diff --git a/malik/all_insights.json b/malik/all_insights.json index 27f6e19..d9f7459 100644 --- a/malik/all_insights.json +++ b/malik/all_insights.json @@ -2,26 +2,26 @@ "ABDULMALEEK-FINAL-YEAR-PROJECT": { "author_metadata": "Abdul-Malik Abdullahi Mustapha. A final year Mechatronics Engineering student at the Federal University of Minna, Nigeria.", "source_metadata": "Abdul-Malik final year project", - "knowledge": [ + "insights": [ { "type": "fact", "insight": "Document title: Artificial Intelligence-Enabled Multi-Function Activity Monitoring and Reporting System", "content": "ARTIFICIAL INTELLIGENCE-ENABLED MULTI-FUNCTION ACTIVITY MONITORING AND REPORTING SYSTEM", "attributes": [ { - "attribute": "section", + "name": "section", "value": "title page" }, { - "attribute": "page_number", + "name": "page_number", "value": "1" }, { - "attribute": "document_id", + "name": "document_id", "value": "2016/1/60695ET" }, { - "attribute": "author_line", + "name": "author_line", "value": "MUSTAPHA, ABDUL-MALIK ABDULLAHI" } ] @@ -32,7 +32,7 @@ "content": "MUSTAPHA, ABDUL-MALIK ABDULLAHI 2016/1/60695ET", "attributes": [ { - "attribute": "author_line", + "name": "author_line", "value": "MUSTAPHA, ABDUL-MALIK ABDULLAHI" } ] @@ -43,11 +43,11 @@ "content": "MUSTAPHA, ABDUL-MALIK ABDULLAHI", "attributes": [ { - "attribute": "author_last_name", + "name": "author_last_name", "value": "Mustapha" }, { - "attribute": "author_given_names", + "name": "author_given_names", "value": "Abdul-Malik Abdullahi" } ] @@ -58,19 +58,19 @@ "content": "DEPARTMENT OF MECHATRONICS ENGINEERING SCHOOL OF ELECTRICAL ENGINEERING AND TECHNOLOGY, FEDERAL UNIVERSITY OF TECHNOLOGY, MINNA, NIGER STATE", "attributes": [ { - "attribute": "department", + "name": "department", "value": "Mechatronics Engineering" }, { - "attribute": "school", + "name": "school", "value": "Electrical Engineering and Technology (School)" }, { - "attribute": "institution", + "name": "institution", "value": "Federal University of Technology, Minna" }, { - "attribute": "location", + "name": "location", "value": "Minna, Niger State" } ] @@ -81,7 +81,7 @@ "content": "FEBRUARY 2022", "attributes": [ { - "attribute": "date_published", + "name": "date_published", "value": "February 2022" } ] @@ -92,7 +92,7 @@ "content": "1", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "1" } ] @@ -103,11 +103,11 @@ "content": "ARTIFICIAL INTELLIGENCE-ENABLED MULTI-FUNCTION ACTIVITY MONITORING AND REPORTING SYSTEM MUSTAPHA, ABDUL-MALIK ABDULLAHI 2016/1/60695ET DEPARTMENT OF MECHATRONICS ENGINEERING SCHOOL OF ELECTRICAL ENGINEERING AND TECHNOLOGY, FEDERAL UNIVERSITY OF TECHNOLOGY, MINNA, NIGER STATE FEBRUARY 2022", "attributes": [ { - "attribute": "section", + "name": "section", "value": "title page" }, { - "attribute": "document_type_guess", + "name": "document_type_guess", "value": "thesis/dissertation/project report title page" } ] @@ -118,31 +118,31 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "project_title", + "name": "project_title", "value": "Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System" }, { - "attribute": "source", + "name": "source", "value": "Page 2 of 56" }, { - "attribute": "author", + "name": "author", "value": "Mustapha Abdul-malik Abdullahi" }, { - "attribute": "author_id", + "name": "author_id", "value": "2016/1/60695ET" }, { - "attribute": "degree", + "name": "degree", "value": "Bachelor of Engineering (B.Eng.)" }, { - "attribute": "university", + "name": "university", "value": "Federal University of Technology, Minna" } ] @@ -153,23 +153,23 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "author", + "name": "author", "value": "Mustapha Abdul-malik Abdullahi" }, { - "attribute": "author_id", + "name": "author_id", "value": "2016/1/60695ET" }, { - "attribute": "source", + "name": "source", "value": "Page 2 of 56" }, { - "attribute": "note", + "name": "note", "value": "Part of header content confirming authorship" } ] @@ -180,15 +180,15 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "degree", + "name": "degree", "value": "Bachelor of Engineering (B.Eng.)" }, { - "attribute": "university", + "name": "university", "value": "Federal University of Technology, Minna" } ] @@ -199,11 +199,11 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "note", + "name": "note", "value": "Satisfies rules for degree award; authorized for knowledge and literary presentation" } ] @@ -214,19 +214,19 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation. Engr. K.E. Jack ……………………… Project Supervisor Signature and Date Prof. J.G. Kolo …………………... Head of Department. Signature and Date ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "supervisor", + "name": "supervisor", "value": "Engr. K.E. Jack" }, { - "attribute": "hod", + "name": "hod", "value": "Prof. J.G. Kolo" }, { - "attribute": "signature_lines", + "name": "signature_lines", "value": "Signature and Date" } ] @@ -237,11 +237,11 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation. Engr. K.E. Jack ……………………… Project Supervisor Signature and Date Prof. J.G. Kolo …………………... Head of Department. Signature and Date ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "page_number", + "name": "page_number", "value": "2 of 56" } ] @@ -252,11 +252,11 @@ "content": "CERTIFICATION This project, \"Artificial Intelligence-Enabled Multi-function Activity Monitoring and Reporting System\" by Mustapha Abdul-malik Abdullahi (2016/1/60695ET), satisfies the rules regulating the award of the Degree of Bachelor of Engineering (B.Eng.) at the Federal University of Technology, Minna, and it is authorized for the contribution of scientific knowledge and literary presentation. Engr. K.E. Jack ……………………… Project Supervisor Signature and Date Prof. J.G. Kolo …………………... Head of Department. Signature and Date ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Certification page" }, { - "attribute": "signature_lines", + "name": "signature_lines", "value": "Signature and Date" } ] @@ -267,23 +267,23 @@ "content": "DEDICATION This project is dedicated to Allah for His faithfulness to me during my university years and to everyone who has helped in any manner over the years.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Dedication" }, { - "attribute": "page_number", + "name": "page_number", "value": "3" }, { - "attribute": "tone", + "name": "tone", "value": "devotional/religious" }, { - "attribute": "topic", + "name": "topic", "value": "Project dedication text" }, { - "attribute": "source_document", + "name": "source_document", "value": "Document page 3 of 56" } ] @@ -294,15 +294,15 @@ "content": "DEDICATION This project is dedicated to Allah for His faithfulness to me during my university years and to everyone who has helped in any manner over the years.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "DEDICATION" }, { - "attribute": "source_page", + "name": "source_page", "value": "4" }, { - "attribute": "author", + "name": "author", "value": "thesis author" } ] @@ -313,11 +313,11 @@ "content": "All thanks to Allah (S.W.T) for being Al-Rahman (The Merciful) and Al-Fattah (The Opener) during the more than five years I spent at the Federal University of Technology in Minna.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" }, { - "attribute": "source_page", + "name": "source_page", "value": "4" } ] @@ -328,15 +328,15 @@ "content": "I wish to express my profound gratitude to Engr. Dr. K. E. Jack, my supervisor, for the mentorship he provided as both my instructor and my boss.", "attributes": [ { - "attribute": "person", + "name": "person", "value": "Engr. Dr. K. E. Jack" }, { - "attribute": "role", + "name": "role", "value": "supervisor/instructor/mentor" }, { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" } ] @@ -347,11 +347,11 @@ "content": "I'm appreciative My level advisers from 100 level to 500 level, Dr. Muhammad Enagi Bima, Dr. T. A. Folorunso, and Engr. Justice C. Annunso, true heroes as well for their support and counsel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" }, { - "attribute": "subjects", + "name": "subjects", "value": "level advisers (100 to 500 level)" } ] @@ -362,15 +362,15 @@ "content": "I also want to thank the Head of the Department, Prof. J.G. Kolo, for his tireless efforts to make the department an outstanding one.", "attributes": [ { - "attribute": "person", + "name": "person", "value": "Prof. J.G. Kolo" }, { - "attribute": "role", + "name": "role", "value": "Head of Department" }, { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" } ] @@ -381,11 +381,11 @@ "content": "And to my amazing parents, I'd want to express my gratitude for always being there for me, for making this trip possible, and for their love, prayers, and support.", "attributes": [ { - "attribute": "relation", + "name": "relation", "value": "parents" }, { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" } ] @@ -396,11 +396,11 @@ "content": "I really thank you for this and everything else. Thank you to Sule, Sister, Musty, and the rest of my loving family and friends for being such a wonderful support system.", "attributes": [ { - "attribute": "subject", + "name": "subject", "value": "Sule, Sister, Musty, family and friends" }, { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" } ] @@ -411,11 +411,11 @@ "content": "Your role, whether direct or indirect, cannot be understated.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "ACKNOWLEDGMENT" }, { - "attribute": "subject", + "name": "subject", "value": "Head of Department" } ] @@ -426,11 +426,11 @@ "content": "The page shows spacing and punctuation inconsistencies as seen in the scanned text (e.g., stray line breaks and spacing).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "FORMAT" }, { - "attribute": "source_page", + "name": "source_page", "value": "4" } ] @@ -441,19 +441,19 @@ "content": "ACKNOWLEDGMENT All thanks to Allah (S.W.T) for being Al-Rahman (The Merciful) and Al-Fattah (The Opener) during the more than five years I spent at the Federal University of Technology in Minna. All glory is due to Him.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "page", + "name": "page", "value": "5" }, { - "attribute": "tone", + "name": "tone", "value": "religious/reverential" }, { - "attribute": "source", + "name": "source", "value": "Document content" } ] @@ -464,15 +464,15 @@ "content": "I wish to express my profound gratitude to Engr. Dr. K. E. Jack, my supervisor, for the mentorship he provided as both my instructor and my boss.", "attributes": [ { - "attribute": "person", + "name": "person", "value": "Engr. Dr. K. E. Jack" }, { - "attribute": "role", + "name": "role", "value": "supervisor, mentor, instructor, boss" }, { - "attribute": "section", + "name": "section", "value": "Acknowledgment" } ] @@ -483,19 +483,19 @@ "content": "I'm appreciative My level advisers from 100 level to 500 level, Dr. Muhammad Enagi Bima, Dr. T. A. Folorunso, and Engr. Justice C. Annunso, true heroes as well for their support and counsel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "people", + "name": "people", "value": "Dr. Muhammad Enagi Bima; Dr. T. A. Folorunso; Engr. Justice C. Annunso" }, { - "attribute": "roles", + "name": "roles", "value": "level advisers" }, { - "attribute": "source", + "name": "source", "value": "Document content" } ] @@ -506,19 +506,19 @@ "content": "I also want to thank the Head of the Department, Prof. J.G. Kolo, for his tireless efforts to make the department an outstanding one.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "person", + "name": "person", "value": "Prof. J.G. Kolo" }, { - "attribute": "role", + "name": "role", "value": "Head of Department" }, { - "attribute": "tone", + "name": "tone", "value": "positive/commendatory" } ] @@ -529,15 +529,15 @@ "content": "Your role, whether direct or indirect, cannot be understated.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "content", + "name": "content", "value": "Your role, whether direct or indirect, cannot be understated." }, { - "attribute": "tone", + "name": "tone", "value": "positive/commendatory" } ] @@ -548,11 +548,11 @@ "content": "And to my amazing parents, I'd want to express my gratitude for always being there for me, for making this trip possible, and for their love, prayers, and support.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "relation", + "name": "relation", "value": "parents" } ] @@ -563,11 +563,11 @@ "content": "Thank you to Sule, Sister, Musty, and the rest of my loving family and friends for being such a wonderful support system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Acknowledgment" }, { - "attribute": "relation", + "name": "relation", "value": "family and friends" } ] @@ -578,19 +578,19 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n \nstandards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nmust\n \nhave\n \na\n monitor­ing\n \nsystem\n \nas\n it\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \nthe\n \nuser\n \ntogether\n \nw ith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \t the\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \n \n\n the\n \nnecessity\n \nfor\n \nan\n \neffective\n \ntechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \nbetween\n \frames\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \nposes\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \nthe\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \nactivity\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \nthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \nbecomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "component", + "name": "component", "value": "Raspberry Pi" }, { - "attribute": "role", + "name": "role", "value": "microcontroller" }, { - "attribute": "source", + "name": "source", "value": "Page 6 (Abstract)" } ] @@ -601,15 +601,15 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \nthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "feature", + "name": "feature", "value": "mobile monitoring app with alerting" }, { - "attribute": "video_recording", + "name": "video_recording", "value": "user-controlled" } ] @@ -620,15 +620,15 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "topic", + "name": "topic", "value": "CV/AI influence on improvements" }, { - "attribute": "tone", + "name": "tone", "value": "positive" } ] @@ -639,15 +639,15 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "model", + "name": "model", "value": "CNN for object detection" }, { - "attribute": "dataset", + "name": "dataset", "value": "COCO" } ] @@ -658,15 +658,15 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "model", + "name": "model", "value": "CNN for object detection" }, { - "attribute": "dataset", + "name": "dataset", "value": "COCO" } ] @@ -677,11 +677,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "topic", + "name": "topic", "value": "Observation of detection effectiveness" } ] @@ -692,11 +692,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "summary", + "name": "summary", "value": "real-time identifications" } ] @@ -707,11 +707,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "summary", + "name": "summary", "value": "clever and dependable after testing" } ] @@ -722,11 +722,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "section", + "name": "section", "value": "Abstract" } ] @@ -737,11 +737,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \nit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "model", + "name": "model", "value": "CNN for object detection on COCO" } ] @@ -752,15 +752,15 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nan d\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "models", + "name": "models", "value": "SSD, MobileNet, PoseNet" }, { - "attribute": "task", + "name": "task", "value": "object tracking/pose estimation" } ] @@ -771,11 +771,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nan d\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "section", + "name": "section", "value": "Abstract" } ] @@ -786,11 +786,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nan d\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "note", + "name": "note", "value": "misspelling Raspberry Pi as Rasberry-Pi" } ] @@ -801,11 +801,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \nthat\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nan d\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -816,11 +816,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \nit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nan d\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "attribute", + "name": "attribute", "value": "Raspberry Pi misspelled as Rasberry-Pi" } ] @@ -831,11 +831,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nand\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "note", + "name": "note", "value": "additional context" } ] @@ -846,11 +846,11 @@ "content": "ABSTRACT In the modern technological civilization, data is the new oil. The influence of \nefficient\n \n data\n has\n \naltered\n \nperformance\n standards\n \nin\nterms\n \nof\n \naccuracy\n \nand\n \nspeed.\n \nEvery\n \nhospital\n \nand\n \nhousehold\n \nis\n \nhave\n \na\n \nmonitoring\n \nsystem\n \nas\n \tit\n \nis\n \nso\n \ncrucial.\n \nAs\n \na\n \nRaspberry\n \nPi\n \nis\n \nused\n \nin\n \nthe\n \nsystem's\n \narchitecture\n \nas\n \the\n \nmicrocontroller,\n \nwhile\n \ta\n \nmobile\n \napplication\n \nis\n \ndeveloped\n \nfor\n \nmonitoring\n \nactivities\n \ndetected\n \nin\n \nthe\n \nsystem\n \nto\n \nalert\n \tthe\n \nuser\n \ntogether\n \nwith\n \na\n \nuser-controlled\n \nvideo\n \nrecording\n \nsystem.\n \nThe\n \nimprovement\n \nmay\n \nbe\n \nseen\n \nsince\n \ndata\n \nprocessing\n \nwas\n \nhandled\n \nby\n \ntwo\n \ncurrent\n \nindustry\n \nbuzzwords,\n \nComputer\n \nVision\n \n(CV)\n \nand\n \nArtificial\n \nIntelligence\n \n(AI).\n \nTwo\n \ntechnologies\n \nhave\n \nmade\n \nit\n \npossible\n \nto\n \ndo\n \nimportant\n \njobs\n \nlike\n \nobject\n \ndetecting\n \nsystems.\n \nAs\n \nthe\n \nnumber\n \nof\n \nfeatures\n \nin\n \na\n \npicture\n \ngrows,\n \nso\n \ndoes\n \nthe\n \nnecessity\n \nfor\n \nan\n \neffective\n \ttechnique\n \nto\n \nextract\n \nhidden\n \ninformation.\n \nThe\n \nCNN\n \nmodel\n \nis\n \nintended\n \nfor\n \nobject\n \ndetection\n \non\n \nthe\n \nCOCO\n \ndataset.\n \nAccuracy\n \nand\n \nprecision\n \nare\n \ntwo\n \nperformance\n \nmeasures\n \nthat\n \nare\n \nused\n \nto\n \nevaluate\n \nand\n \nverify\n \nthe\n \nmodel's\n \nperformance.\n \nSingle\n \nShot\n \nDetection\n \n(SSD),\n \nmobileNet,\n \nand\n \nPosenet\n \nare\n \nused\n \nto\n \nfollow\n \nobjects\n \bin\n \nframes\n \nin\n \norder\n \nto\n \nestimate\n \ntheir\n \npos es\n \nvia\n \nthe\n \nvideo\n \nfeed.\n \nIt\n \nwas\n \nobserved\n \ndetection\n \nwas\n \neffective\n \nand\n \nefficient.\n \nFor\n \nreal-time\n \ndetection\n \nand\n \nrecognition\n \napplications,\n \tthe\n \nalgorithms\n \nprovide\n \nreal-time,\n \naccurate,\n \nexact\n \nidentifications.\n \nAccording\n \ntest\n \nfindings,\n \nthe\n \nsystem\n \ncan\n \ncollect\n \nan\n \naverage\n \nof\n \n10.7\n \nframes\n \nper\n \nsecond\n \nand\n \nhas\n \na\n \ncamera\n \nmovement\n \nreaction\n \ntime\n \nof\n \nless\n \nthan\n \none\n \nsecond\n \nwhen\n \nthere\n \nare\n \nno\n \nmore\n \nthan\n \n2\n \nclients\n \nin\n \na\n \nnetwork.\n \nThe\n \nmonitoring\n \nsystem\n \becomes\n \na\n \nclever\n \nan d\n \ndependable\n \nsystem\n \nafter\n \nsuccessful\n \ntesting\n \nof\n \nthe\n \nrecording\n \nand\n \nmotion\n \ndetecting\n \nfeatures.\n \nKeywords: Artificial Intelligence; Computer Vision; Convolution Neural \n \nNetwork;Single\n \nShot\n \nDetection;\n \nMobileNet;\n \nObject\n \ndetection;\n \nRasberry-Pi.\n \n \n \n6 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Abstract" }, { - "attribute": "note", + "name": "note", "value": "misspelling in keywords Rasberry-Pi" } ] @@ -861,15 +861,15 @@ "content": "2.6 Summary of the Reviewed Literatures ........................................................... 11 CHAPTER THREE .................................................................................................. 13 3.0 METHODOLOGY ............................................................................................. 13 3.1 Overview ........................................................................................................ 13 3.2 Materials ......................................................................................................... 13 3.2.1 Software Materials with description ........................................................ 13 3.2.2 Hardware Materials with description ...................................................... 15 3.3 Method ........................................................................................................... 16 3.3.1 Design of a multi-function activity monitoring and reporting system .... 17 3.3.2 Design of a centralized database architecture with storage scheme for multiple activity tracking and reporting system .......................................... 23 3.3.3 Design of a mobile application for managing monitored and reported activites ............................................................................................................. 23 3.4 Bill of Engineering Measurement and Evaluation (BEME) .......................... 25 Major Research Methods and its Findings ........................................................... 25 CHAPTER FOUR .................................................................................................... 26 4.0 RESULT AND DISCUSSION ............................................................................ 26 4.1 Overview ........................................................................................................ 26 4.2 Results and Discussions for the multi-function activity monitoring and reporting system ................................................................................................... 26 4.2.1 Motion Detection ..................................................................................... 27 4.2.2 Posture Recognition ................................................................................. 29 4.3 Results and Discussion for the centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31 4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ....................................................................................... 38", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Chapter 3 - Methodology (TOC)" }, { - "attribute": "page", + "name": "page", "value": "7" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -880,15 +880,15 @@ "content": "4.0 RESULT AND DISCUSSION ............................................................................ 26 4.1 Overview ........................................................................................................ 26 4.2 Results and Discussions for the multi-function activity monitoring and reporting system ................................................................................................... 26 4.2.1 Motion Detection ..................................................................................... 27 4.2.2 Posture Recognition ................................................................................. 29 4.3 Results and Discussion for the centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31 4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ....................................................................................... 38", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Chapter 4 - Results and Discussion (TOC)" }, { - "attribute": "page", + "name": "page", "value": "7" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -899,11 +899,11 @@ "content": "2.6 Summary of the Reviewed Literatures ........................................................... 11 CHAPTER THREE .................................................................................................. 13 3.0 METHODOLOGY ............................................................................................. 13 3.1 Overview ........................................................................................................ 13 3.2 Materials ......................................................................................................... 13 3.2.1 Software Materials with description ........................................................ 13 3.2.2 Hardware Materials with description ...................................................... 15 3.3 Method ........................................................................................................... 16 3.3.1 Design of a multi-function activity monitoring and reporting system .... 17 3.3.2 Design of a centralized database architecture with storage scheme for multiple activity tracking and reporting system .......................................... 23 3.3.3 Design of a mobile application for managing monitored and reported activites ............................................................................................................. 23 3.4 Bill of Engineering Measurement and Evaluation (BEME) .......................... 25 Major Research Methods and its Findings ........................................................... 25 CHAPTER FOUR .................................................................................................... 26 4.0 RESULT AND DISCUSSION ............................................................................ 26 4.1 Overview ........................................................................................................ 26 4.2 Results and Discussions for the multi-function activity monitoring and reporting system ................................................................................................... 26 4.2.1 Motion Detection ..................................................................................... 27 4.2.2 Posture Recognition ................................................................................. 29 4.3 Results and Discussion for the centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31 4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ....................................................................................... 38", "attributes": [ { - "attribute": "section", + "name": "section", "value": "TOC Copy" }, { - "attribute": "page", + "name": "page", "value": "7" } ] @@ -914,23 +914,23 @@ "content": "2.6 Summary of the Reviewed Literatures ........................................................... 11 CHAPTER THREE .................................................................................................. 13 3.0 METHODOLOGY ............................................................................................. 13 3.1 Overview ........................................................................................................ 13 3.2 Materials ......................................................................................................... 13 3.2.1 Software Materials with description ........................................................ 13 3.2.2 Hardware Materials with description ...................................................... 15 3.3 Method ........................................................................................................... 16 3.3.1 Design of a multi-function activity monitoring and reporting system .... 17 3.3.2 Design of a centralized database architecture with storage scheme for multiple activity tracking and reporting system .......................................... 23 3.3.3 Design of a mobile application for managing monitored and reported activites ............................................................................................................. 23 3.4 Bill of Engineering Measurement and Evaluation (BEME) .......................... 25 Major Research Methods and its Findings ........................................................... 25 CHAPTER FOUR .................................................................................................... 26 4.0 RESULT AND DISCUSSION ............................................................................ 26 4.1 Overview ........................................................................................................ 26 4.2 Results and Discussions for the multi-function activity monitoring and reporting system ................................................................................................... 26 4.2.1 Motion Detection ..................................................................................... 27 4.2.2 Posture Recognition ................................................................................. 29 4.3 Results and Discussion for centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31 4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ...................................................................................... 38 \n", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Page content/TOC for Chapters 2-4" }, { - "attribute": "page_number", + "name": "page_number", "value": "8" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -941,23 +941,23 @@ "content": "CHAPTER THREE .................................................................................................. 13 3.0 METHODOLOGY ............................................................................................. 13 3.1 Overview ........................................................................................................ 13 3.2 Materials ......................................................................................................... 13 3.2.1 Software Materials with description ........................................................ 13 3.2.2 Hardware Materials with description ...................................................... 15 3.3 Method ........................................................................................................... 16 3.3.1 Design of a multi-function activity monitoring and reporting system .... 17 3.3.2 Design of a centralized database architecture with storage scheme for multiple activity tracking and reporting system .......................................... 23 3.3.3 Design of a mobile application for managing monitored and reported activites ............................................................................................................. 23 3.4 Bill of Engineering Measurement and Evaluation (BEME) .......................... 25 Major Research Methods and its Findings ........................................................... 25", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Chapter 3 – Methodology (TOC)" }, { - "attribute": "page_number", + "name": "page_number", "value": "8" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -968,23 +968,23 @@ "content": "CHAPTER FOUR .................................................................................................... 26 4.0 RESULT AND DISCUSSION ............................................................................ 26 4.1 Overview ........................................................................................................ 26 4.2 Results and Discussions for the multi-function activity monitoring and reporting system ................................................................................................... 26 4.2.1 Motion Detection ..................................................................................... 27 4.2.2 Posture Recognition ................................................................................. 29 4.3 Results and Discussion for centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31 4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ...................................................................................... 38 \n", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Chapter 4 – Results and Discussion (TOC)" }, { - "attribute": "page_number", + "name": "page_number", "value": "8" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -995,23 +995,23 @@ "content": "4.3 Results and Discussion for centralized database architecture with storage scheme for multiple activity tracking and reporting system ................................ 29 4.3.1 Firebase Realtime database ..................................................................... 30 4.3.2Firebase Cloud Storage ............................................................................. 31", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Firebase integration in Database section" }, { - "attribute": "page_number", + "name": "page_number", "value": "29-31" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -1022,23 +1022,23 @@ "content": "4.4 Results and Discussions for the mobile application for managing monitored and reported activites. .......................................................................................... 31 4.4.1 Introduction Screen .................................................................................. 32 4.4.2 Login Screen ............................................................................................ 32 4.4.3 Registration Screen .................................................................................. 33 4.4.4 Forget Password Screen ........................................................................... 34 4.4.5 Home Screen ............................................................................................ 35 4.4.6 Create Activity Screen ............................................................................. 36 4.4.7Activities Screen ...................................................................................... 38 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Mobile App Screens in TOC (4.4)" }, { - "attribute": "page_number", + "name": "page_number", "value": "31-38" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -1049,23 +1049,23 @@ "content": "4.3.2Firebase Cloud Storage ............................................................................. 31 4.4.7Activities Screen ...................................................................................... 38 ", "attributes": [ { - "attribute": "issue", + "name": "issue", "value": "Formatting/typo irregularities (missing space after 4.3.2, misspelling 'activites')" }, { - "attribute": "page_number", + "name": "page_number", "value": "31-38" }, { - "attribute": "source", + "name": "source", "value": "Document TOC excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Neutral" } ] @@ -1076,27 +1076,27 @@ "content": "Figure 3.1:Raspberry Pi 3 Controller 15", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.1" }, { - "attribute": "description", + "name": "description", "value": "Raspberry Pi 3 Controller" }, { - "attribute": "page_number", + "name": "page_number", "value": "15" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1107,27 +1107,27 @@ "content": "Figure. 3.2: Raspberry Pi Camera Module V2 16", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.2" }, { - "attribute": "description", + "name": "description", "value": "Raspberry Pi Camera Module V2" }, { - "attribute": "page_number", + "name": "page_number", "value": "16" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1138,27 +1138,27 @@ "content": "Figure 3.3: AI based multi-function activity monitoring and reporting system 16", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.3" }, { - "attribute": "description", + "name": "description", "value": "AI based multi-function activity monitoring and reporting system" }, { - "attribute": "page_number", + "name": "page_number", "value": "16" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1169,27 +1169,27 @@ "content": "Figure 3.4: Circuit design for multi-function activity monitoring and reporting system 18", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.4" }, { - "attribute": "description", + "name": "description", "value": "Circuit design for multi-function activity monitoring and reporting system" }, { - "attribute": "page_number", + "name": "page_number", "value": "18" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1200,27 +1200,27 @@ "content": "Figure 3.5: Flowchart for video acquisition 19", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.5" }, { - "attribute": "description", + "name": "description", "value": "Flowchart for video acquisition" }, { - "attribute": "page_number", + "name": "page_number", "value": "19" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1231,27 +1231,27 @@ "content": "Figure 3.6: Flow chart for detecting and recognizing activities 20", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.6" }, { - "attribute": "description", + "name": "description", "value": "Flow chart for detecting and recognizing activities" }, { - "attribute": "page_number", + "name": "page_number", "value": "20" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1262,27 +1262,27 @@ "content": "Figure 3.7: Single Shot Detector (SSD) Framework 21", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.7" }, { - "attribute": "description", + "name": "description", "value": "Single Shot Detector (SSD) Framework" }, { - "attribute": "page_number", + "name": "page_number", "value": "21" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1293,27 +1293,27 @@ "content": "Figure 3.8: Flowchart for multi-function activity monitoring and reporting system 22", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.8" }, { - "attribute": "description", + "name": "description", "value": "Flowchart for multi-function activity monitoring and reporting system" }, { - "attribute": "page_number", + "name": "page_number", "value": "22" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1324,27 +1324,27 @@ "content": "Figure 3.8: Centralized database architecture with storage scheme for multiple activity tracking and reporting system 23", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.8 (2)" }, { - "attribute": "description", + "name": "description", "value": "Centralized database architecture with storage scheme for multiple activity tracking and reporting system" }, { - "attribute": "page_number", + "name": "page_number", "value": "23" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1355,27 +1355,27 @@ "content": "Figure 3.9: Illustration of React Navigation 25", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.9" }, { - "attribute": "description", + "name": "description", "value": "Illustration of React Navigation" }, { - "attribute": "page_number", + "name": "page_number", "value": "25" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1386,27 +1386,27 @@ "content": "Figure 4.1: Motion detection directly from testing the Pi Camera on desk 27", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.1" }, { - "attribute": "description", + "name": "description", "value": "Motion detection directly from testing the Pi Camera on desk" }, { - "attribute": "page_number", + "name": "page_number", "value": "27" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1417,27 +1417,27 @@ "content": "Figure 4.2: Motion detection from Raspberry Pi Camera placed at the top 28", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.2" }, { - "attribute": "description", + "name": "description", "value": "Motion detection from Raspberry Pi Camera placed at the top" }, { - "attribute": "page_number", + "name": "page_number", "value": "28" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1448,27 +1448,27 @@ "content": "Figure 4.3: Body Posture Detection of a person sitting down 29", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.3" }, { - "attribute": "description", + "name": "description", "value": "Body Posture Detection of a person sitting down" }, { - "attribute": "page_number", + "name": "page_number", "value": "29" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1479,27 +1479,27 @@ "content": "Figure 4.5: Activities collection table on Firebase 30", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.5" }, { - "attribute": "description", + "name": "description", "value": "Activities collection table on Firebase" }, { - "attribute": "page_number", + "name": "page_number", "value": "30" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1510,27 +1510,27 @@ "content": "Figure 4.6: Camera collection table on Firebase 30", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.6" }, { - "attribute": "description", + "name": "description", "value": "Camera collection table on Firebase" }, { - "attribute": "page_number", + "name": "page_number", "value": "30" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1541,27 +1541,27 @@ "content": "Figure 4.7: Tracked collection table on Firebase 31", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.7" }, { - "attribute": "description", + "name": "description", "value": "Tracked collection table on Firebase" }, { - "attribute": "page_number", + "name": "page_number", "value": "31" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1572,27 +1572,27 @@ "content": "Figure 4.7: Firebase Cloud Storage 31", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.7 (2)" }, { - "attribute": "description", + "name": "description", "value": "Firebase Cloud Storage" }, { - "attribute": "page_number", + "name": "page_number", "value": "31" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1603,27 +1603,27 @@ "content": "Figure 4.8: Introduction Screen 32", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.8" }, { - "attribute": "description", + "name": "description", "value": "Introduction Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "32" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1634,27 +1634,27 @@ "content": "Figure 4.9: Login Screen 33", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.9" }, { - "attribute": "description", + "name": "description", "value": "Login Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "33" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1665,31 +1665,31 @@ "content": "Figure 4.10: Registration Screen 34", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "attribute_description", + "name": "attribute_description", "value": "N/A" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.10" }, { - "attribute": "description", + "name": "description", "value": "Registration Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "34" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1700,27 +1700,27 @@ "content": "Figure 4.11: Forgot Password Screen 35", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.11" }, { - "attribute": "description", + "name": "description", "value": "Forgot Password Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "35" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1731,27 +1731,27 @@ "content": "Figure 4.12: Home Screen 36", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.12" }, { - "attribute": "description", + "name": "description", "value": "Home Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "36" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1762,27 +1762,27 @@ "content": "Figure 3.8: Flowchart for multi-function activity monitoring and reporting system 22", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 3.8" }, { - "attribute": "description", + "name": "description", "value": "Flowchart for multi-function activity monitoring and reporting system" }, { - "attribute": "page_number", + "name": "page_number", "value": "22" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1793,27 +1793,27 @@ "content": "Figure 4.12: Home Screen 36", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "figure_label", + "name": "figure_label", "value": "Figure 4.12" }, { - "attribute": "description", + "name": "description", "value": "Home Screen" }, { - "attribute": "page_number", + "name": "page_number", "value": "36" }, { - "attribute": "source_page", + "name": "source_page", "value": "9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -1824,23 +1824,23 @@ "content": "LIST OF FIGURES Figure 3.1:Raspberry Pi 3 Controller 15 Figure. 3.2: Raspberry Pi Camera Module V2 16 Figure 3.3: AI based multi-function activity monitoring and reporting system 16 Figure 3.4: Circuit design for multi-function activity monitoring and reporting system 18 Figure 3.5: Flowchart for video acquisition 19 Figure 3.6: Flow chart for detecting and recognizing activities 20 Figure 3.7: Single Shot Detector (SSD) Framework 21 Figure 3.8: Flowchart for multi-function activity monitoring and reporting system 22 Figure 3.8: Centralized database architecture with storage scheme for multiple activity tracking and reporting system 23 Figure 3.9: Illustration of React Navigation 25 Figure 4.1: Motion detection directly from testing the Pi Camera on desk 27 Figure 4.2: Motion detection from Raspberry Pi Camera placed at the top 28 Figure 4.3: Body Posture Detection of a person sitting down 29 Figure 4.5: Activities collection table on Firebase 30 Figure 4.6: Camera collection table on Firebase 30 Figure 4.7: Tracked collection table on Firebase 31 Figure 4.7: Firebase Cloud Storage 31 Figure 4.8: Introduction Screen 32 Figure 4.9: Login Screen 33 Figure 4.10: Registration Screen 34 Figure 4.11: Forgot Password Screen 35 Figure 4.12: Home Screen 36", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "page", + "name": "page", "value": "10" }, { - "attribute": "range", + "name": "range", "value": "3.1-4.12" }, { - "attribute": "notes", + "name": "notes", "value": "Two entries labelled Figure 3.8; 4.7 appears twice; minor typographical spacing differences present." }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1851,15 +1851,15 @@ "content": "Figure 3.1:Raspberry Pi 3 Controller 15 Figure. 3.2: Raspberry Pi Camera Module V2 16 Figure 3.3: AI based multi-function activity monitoring and reporting system 16 Figure 3.4: Circuit design for multi-function activity monitoring and reporting system 18", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "range", + "name": "range", "value": "3.1-3.4" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1870,15 +1870,15 @@ "content": "Figure 3.5: Flowchart for video acquisition 19 Figure 3.6: Flow chart for detecting and recognizing activities 20 Figure 3.7: Single Shot Detector (SSD) Framework 21", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "range", + "name": "range", "value": "3.5-3.7" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1889,15 +1889,15 @@ "content": "Figure 3.8: Flowchart for multi-function activity monitoring and reporting system 22 Figure 3.8: Centralized database architecture with storage scheme for multiple activity tracking and reporting system 23 Figure 3.9: Illustration of React Navigation 25", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "range", + "name": "range", "value": "3.8-3.9" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1908,15 +1908,15 @@ "content": "Figure 4.1: Motion detection directly from testing the Pi Camera on desk 27 Figure 4.2: Motion detection from Raspberry Pi Camera placed at the top 28 Figure 4.3: Body Posture Detection of a person sitting down 29 Figure 4.5: Activities collection table on Firebase 30 Figure 4.6: Camera collection table on Firebase 30 Figure 4.7: Tracked collection table on Firebase 31 Figure 4.7: Firebase Cloud Storage 31 Figure 4.8: Introduction Screen 32 Figure 4.9: Login Screen 33 Figure 4.10: Registration Screen 34 Figure 4.11: Forgot Password Screen 35 Figure 4.12: Home Screen 36", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "range", + "name": "range", "value": "4.1-4.12" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1927,15 +1927,15 @@ "content": "There appears to be numbering duplications (e.g., two entries labeled Figure 3.8 and two entries labeled Figure 4.7) and spacing differences in captions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "page", + "name": "page", "value": "10" }, { - "attribute": "confidence", + "name": "confidence", "value": "Medium" } ] @@ -1946,15 +1946,15 @@ "content": "LIST OF FIGURES ... Raspberry Pi 3 Controller ... React Navigation Introduction Screen ... Home Screen", "attributes": [ { - "attribute": "section", + "name": "section", "value": "List of Figures" }, { - "attribute": "page", + "name": "page", "value": "10" }, { - "attribute": "confidence", + "name": "confidence", "value": "High" } ] @@ -1965,11 +1965,11 @@ "content": "The Artificial Intelligence-enabled multi-function activity monitoring and reporting system is an intelligent device that uses deep learning and computer vision to capture video feeds of a scene, analyse it, predict several activities and report if instructed to do so.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.1 Background Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -1980,11 +1980,11 @@ "content": "Artificial Intelligence (AI) is a wide branch that builds smart machines capable of performing tasks like humans or even better.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.1 Background Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -1995,11 +1995,11 @@ "content": "Deep learning is a branch of AI that attempt to mimic how the human brain works by using neural networks.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.1 Background Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2010,11 +2010,11 @@ "content": "This eliminates the need for using structured data like machine learning and can therefore work with images and text (Mishra & Gupta, 2017).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.1 Background Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2025,11 +2025,11 @@ "content": "The inability to monitor and report several activities at a time considering the present world security challenges necessitates the development of the multi-faceted system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.2 Problem Statement" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2040,11 +2040,11 @@ "content": "i. To design a multi-function activity monitoring and reporting system. ii. To design a centralized database architecture with storage scheme for multiple activity tracking and reporting system. iii. To design a mobile application for managing monitored and reported activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2055,11 +2055,11 @@ "content": "The scope of this project covers design, modelling and development of a mobile prototype of Artificial Intelligence-Enabled multi-functional monitoring and reporting System. Activity recognition is done by getting the video feed output from the camera mounted to the device, the output is then processed and passed to a video prediction algorithm to predict the activity at that point in time. The algorithm is based on deep learning which uses Convolutional Neural Networks (CNN) and Regression Neural Networks (RNN). The combination of these two creates a new algorithm known as Faster Regression Convolutional Neural Network (Faster RCNN) which is used to achieve this project. However, the envisaged limitations to this project is that of power consumption and large data collection.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2070,11 +2070,11 @@ "content": "Any model fed with a limited amount of data, may not perform well and could lead to poor predictions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2085,11 +2085,11 @@ "content": "The potential sector which could benefit from this project includes Health and Security systems. Patient status can be monitored without the need of a medical personnel with this proposed system. Similarly, several insurgencies and bandit activities could be easily monitored and reported without physically intervention using this AI-enabled multi-functional activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2100,11 +2100,11 @@ "content": "Bandit activities has been one of the major reported issues in society and requires an AI-enabled self-reporting and monitoring system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.2 Problem Statement" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2115,11 +2115,11 @@ "content": "Chapter One is structured into five chapters including Chapter One, Chapter Two, Chapter Three, Chapter Four and Chapter Five.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2130,11 +2130,11 @@ "content": "not only to automatically monitor and report but also to make the users feel more secure about homes, offices and other places.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2145,11 +2145,11 @@ "content": "Chaaraoui et al., 2014; Wang, 2019; Mishra & Gupta, 2017; Tee et al., 2015 are cited in the background and methodology discussions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.1 Background Study / 1.4 Scope and Limitations" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2160,11 +2160,11 @@ "content": "Chapter Four is data presentation and analysis. This Includes results and discussions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "source", + "name": "source", "value": "Chapter 1: Introduction" } ] @@ -2175,19 +2175,19 @@ "content": "The aim of this project is to design and implement an Artificial Intelligent (AI) based multi-function Activity monitoring and reporting system with the following objectives: i. To design a multi-function activity monitoring and reporting system. ii. To design a centralized database architecture with storage scheme for multiple activity tracking and reporting system. iii. To design a mobile application for managing monitored and reported activites.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "source", + "name": "source", "value": "Page 12" }, { - "attribute": "reference", + "name": "reference", "value": "—" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -2198,11 +2198,11 @@ "content": "The scope of this project covers design, modelling and development of a mobile prototype of Artificial Intelligence-Enabled multi-functional monitoring and reporting System. This project focus is on multiple activities and events which include but not Limited to health, security and surveillance monitoring and reporting system using deep learning and computer vision to capture the video feeds and predict activities and report back to the user.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2213,11 +2213,11 @@ "content": "Activity recognition is done by getting the video feed output from the camera mounted to the device, the output is then processed and passed to a video prediction algorithm to predict the activity at that point in time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2228,11 +2228,11 @@ "content": "The algorithm is based on deep learning which uses Convolutional Neural Networks (CNN) and Regression Neural Networks (RNN). The combination of these two creates a new algorithm known as Faster Regression Convolutional Neural Network (Faster RCNN) which is used to achieve this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2243,11 +2243,11 @@ "content": "However, the envisaged limitations to this project is that of power consumption and large data collection. The device makes use of a power supply to function effectively and the absence of power in the device makes the device inactive.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2258,11 +2258,11 @@ "content": "Moreso, in the field of AI, the amount of data collected, prepared and trained determines how efficient the predictions could be. Any model fed with a limited amount of data, may not perform well and could lead to poor predictions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2273,11 +2273,11 @@ "content": "major reported issues in society and requires an AI-enabled self-reporting and monitoring system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2288,15 +2288,15 @@ "content": "There’s no known scenario where deep learning is used for activity monitoring with a self-reporting solution but a different solution was found for monitoring the activities of elderly individuals using sensor and kinematic data ((Tee et al., 2015).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" }, { - "attribute": "reference", + "name": "reference", "value": "Tee et al., 2015" } ] @@ -2307,11 +2307,11 @@ "content": "This project focus is on multiple activities and events which include but not Limited to health, security and surveillance monitoring and reporting system", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2322,11 +2322,11 @@ "content": "The combination of these two creates a new algorithm known as Faster Regression Convolutional Neural Network (Faster RCNN) which is used to achieve this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2337,11 +2337,11 @@ "content": "The algorithm is based on deep learning which uses Convolutional Neural Networks (CNN) and Regression Neural Networks (RNN). The combination of these two creates a new algorithm known as Faster Regression Convolutional Neural Network (Faster RCNN) which is used to achieve this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2352,11 +2352,11 @@ "content": "However, the envisaged limitations to this project is that of power consumption and large data collection. The device makes use of a power supply to function effectively and the absence of power in the device makes the device inactive.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations of the Study" }, { - "attribute": "source", + "name": "source", "value": "Page 12" } ] @@ -2367,11 +2367,11 @@ "content": "major reported issues in society and requires an AI-enabled self-reporting and monitoring system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Problem statement / motivation" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2382,15 +2382,15 @@ "content": "There’s no known scenario where deep learning is used for activity monitoring with a self-reporting solution but a different solution was found for monitoring the activities of elderly individuals using sensor and kinematic data ((Tee et al., 2015).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Intro / Background" }, { - "attribute": "source", + "name": "source", "value": "Tee et al., 2015" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2401,11 +2401,11 @@ "content": "The aim of this project is to design and implement an Artificial Intelligent (AI) based multi-function Activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2416,15 +2416,15 @@ "content": "i. To design a multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "attribute", + "name": "attribute", "value": "objective" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2435,15 +2435,15 @@ "content": "ii. To design a centralized database architecture with storage scheme for multiple activity tracking and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "attribute", + "name": "attribute", "value": "objective" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2454,15 +2454,15 @@ "content": "iii. To design a mobile application for managing monitored and reported activites.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.3 Aim and Objectives" }, { - "attribute": "attribute", + "name": "attribute", "value": "objective" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2473,11 +2473,11 @@ "content": "The scope of this project covers design, modelling and development of a mobile prototype of Artificial Intelligence-Enabled multi-functional monitoring and reporting System.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2488,11 +2488,11 @@ "content": "This project focus is on multiple activities and events which include but not Limited to health, security and surveillance monitoring and reporting system using deep learning and computer vision to capture the video feeds and predict activities and report back to the user.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2503,11 +2503,11 @@ "content": "Activity recognition is done by getting the video feed output from the camera mounted to the device, the output is then processed and passed to a video prediction algorithm to predict the activity at that point in time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2518,11 +2518,11 @@ "content": "The algorithm is based on deep learning which uses Convolutional Neural Networks (CNN) and Regression Neural Networks (RNN).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2533,11 +2533,11 @@ "content": "The combination of these two creates a new algorithm known as Faster Regression Convolutional Neural Network (Faster RCNN) which is used to achieve this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2548,11 +2548,11 @@ "content": "However, the envisaged limitations to this project is that of power consumption and large data collection.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2563,11 +2563,11 @@ "content": "The device makes use of a power supply to function effectively and the absence of power in the device makes the device inactive.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2578,11 +2578,11 @@ "content": "Moreso, in the field of AI, the amount of data collected, prepared and trained determines how efficient the predictions could be.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2593,11 +2593,11 @@ "content": "Any model fed with a limited amount of data, may not perform well and could lead to poor predictions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -2608,15 +2608,15 @@ "content": "This project focus is on multiple activities and events which include but not Limited to health, security and surveillance monitoring and reporting system using deep learning and computer vision to capture the video feeds and predict activities and report back to the user.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.4 Scope and Limitations" }, { - "attribute": "page", + "name": "page", "value": "13" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "neutral" } ] @@ -2627,23 +2627,23 @@ "content": "In this project, manual activity monitoring and reporting system is given a phase shift with the use of an AI-enabled multi-function activity recognition monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" }, { - "attribute": "tone", + "name": "tone", "value": "neutral" } ] @@ -2654,19 +2654,19 @@ "content": "The potential sector which could benefit from this project includes Health and Security systems.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -2677,19 +2677,19 @@ "content": "Patient status can be monitored without the need of a medical personnel with this proposed system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -2700,19 +2700,19 @@ "content": "Similarly, several insurgencies and bandit activities could be easily monitored and reported without physically intervention using this AI-enabled multi-functional activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -2723,23 +2723,23 @@ "content": "Since the human attention span is said to be shorter than a goldfish, it’s impossible for an individual of that nature to completely monitor and report several activities at a time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "critical of human capacity" } ] @@ -2750,23 +2750,23 @@ "content": "In view of this, therefore, this project is designed not only to automatically monitor and report but also to make the users feel more secure about homes, offices and other places.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.5 Justification" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "positive toward automation" } ] @@ -2777,19 +2777,19 @@ "content": "The project is structured into five chapters which include chapter one, chapter two, chapter three, chapter four and chapter five.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.92" } ] @@ -2800,19 +2800,19 @@ "content": "Chapter One is the Introductory Chapter and covers background study, problem statement, aim and objectives, scope and limitations, justification and structure.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.92" } ] @@ -2823,19 +2823,19 @@ "content": "Chapter Two is the theoretical literature review. This shows the analysis, synthesis and evaluation of the project topic.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.90" } ] @@ -2846,19 +2846,19 @@ "content": "Chapter Three is the research method and methodology. This includes different methods, strategies, procedures and materials used in this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.90" } ] @@ -2869,19 +2869,19 @@ "content": "Chapter Four is the data presentation and analysis. This Includes results and discussions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.90" } ] @@ -2892,19 +2892,19 @@ "content": "Chapter Five includes the conclusion and recommendation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "1.6 Structure of the Study" }, { - "attribute": "page", + "name": "page", "value": "14" }, { - "attribute": "source", + "name": "source", "value": "Page 14 of 56" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.90" } ] @@ -2915,15 +2915,15 @@ "content": "The object detection pipeline has undergone significant evolution thanks to Fast RCNN and Faster RCNN (Deng, 2009; Girshick, 2015). Fast/Faster RCNN, a forerunner of the RCNN, extracts region-independent features using convolutional layers initialized with discriminative pretraining for ImageNet classification, followed by a region-wise multilayer perceptron (MLP) for classification (Ren, et al., 2015). Additionally, they jointly optimize a sofmax classifier and bounding-box repressors as opposed to training a sofmax classifier, SVMs, and repressors in three different stages. However, this approach is a memory hog because it uses MLP classifier topologies.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.2 Fast-RCNN" }, { - "attribute": "references", + "name": "references", "value": "Deng 2009; Girshick 2015; Ren et al. 2015" }, { - "attribute": "notes", + "name": "notes", "value": "Memory hog due to MLP classifier topologies" } ] @@ -2934,15 +2934,15 @@ "content": "Our core work and contributions consist of the following three areas, which are based on a detailed analysis of the region wise feature classifier in Fast/Faster RCNN. First, we demonstrate that the concatenation of the convolutional layer and the pooling layer forces the input size of the prevalent fully convolutional architectures to meet a set of requirements. Second, based on a thorough examination of these fully convolutional architectures, we propose a method for incorporating contemporary, cutting-edge image classification networks, such as ResNet and various iterations of GoogleNet, into Fast/Faster RCNN detection systems (He, et al., 2016; Szegedy, et al., 2015). Finally, we implement the concept of skip connection similar to PVANET and the FPN hybrid that combines a number of intermediate outputs. As a result, both high-level semantic information and low-level visual features can be considered simultaneously (Kim, et al., 2016; Lin, et al., 2017).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.2 Fast-RCNN" }, { - "attribute": "references", + "name": "references", "value": "He 2016; Szegedy 2015; Kim 2016; Lin 2017" }, { - "attribute": "notes", + "name": "notes", "value": "Three areas: concatenation, integration of ResNet/GoogleNet, skip connections (PVANET/FPN)" } ] @@ -2953,11 +2953,11 @@ "content": "Finally, we implement the concept of skip connection similar to PVANET and FPN hybrid that combines a number of intermediate outputs. As a result, both high-level semantic information and low-level visual features can be considered simultaneously (Kim, et al., 2016; Lin, et al., 2017).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.2 Fast-RCNN" }, { - "attribute": "concept", + "name": "concept", "value": "Skip connections PVANET/FPN" } ] @@ -2968,11 +2968,11 @@ "content": "Computer vision as the theory and technology of creating machines has the capability to detect, track, and classify objects. They are based on cameras as the main means of obtaining information about specific objects. A striking example of the application of this technology is the use of cameras for object detection, and for fixing violations. They allow monitoring of the observance of multi-functioning activities without human intervention, and capable of transmitting this information for due documentation on the configured network.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "notes", + "name": "notes", "value": "Cameras as main info source; enables autonomous monitoring and reporting" } ] @@ -2983,11 +2983,11 @@ "content": "In addition, foreign publications on this topic also indicate great prospects for using this technology for the needs of multi-function activity monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "tone", + "name": "tone", "value": "prospective/optimistic" } ] @@ -2998,15 +2998,15 @@ "content": "Since this technology does not require the regular presence of specialists in the field of monitoring at the facilities, its application is best suited for monitoring the functioning of complex infrastructure facilities that require constant monitoring of their condition, such as human activities, bridges and tunnels, multi-level transport junctions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "use_case", + "name": "use_case", "value": "infrastructure monitoring" }, { - "attribute": "notes", + "name": "notes", "value": "No regular specialist presence required" } ] @@ -3017,11 +3017,11 @@ "content": "The goal of such systems is to reliably automatically convert image or video data into objective information without direct human intervention.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "goal", + "name": "goal", "value": "automatic data conversion to objective information" } ] @@ -3032,11 +3032,11 @@ "content": "In addition, active work is underway to create and modernize already created complexes based on stationary and mobile cameras.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "projects", + "name": "projects", "value": "stationary and mobile cameras" } ] @@ -3047,11 +3047,11 @@ "content": "Another important area in the development of this technology is the creation of algorithms that allow cameras to recognize and report various kinds of ongoing activities which are complex for", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "note", + "name": "note", "value": "Content truncated on page" } ] @@ -3062,15 +3062,15 @@ "content": "Society is confronting a critical dilemma associated to greater life expectancy and a higher number of persons in dependent circumstances as a result of progress and demographic change.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "topic", + "name": "topic", "value": "demographics / aging society" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3081,15 +3081,15 @@ "content": "As a result, there is a significant demand for personal autonomy support systems.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "topic", + "name": "topic", "value": "assistive autonomy technology" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3100,11 +3100,11 @@ "content": "Chaaraoui et al. (2014), developed a vision home system that allows elderly and disabled individuals to live independently at home while receiving care and safety services via vision-based monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3115,11 +3115,11 @@ "content": "The system's specification is also offered, as well as innovative contributions to human behavior analysis and privacy protection.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3130,11 +3130,11 @@ "content": "The behavior recognition method's experimental findings indicate excellent performance, as well as support for multi-view situations and real-time execution, both of which are necessary to provide the suggested services.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3145,11 +3145,11 @@ "content": "The study also discusses computer vision technologies, neural network techniques, and artificial intelligence methodologies in connection to the topic of monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3160,11 +3160,11 @@ "content": "As a result, a graphical representation of the structure of an intelligent system for support and decision-making, as well as a block diagram of a stationary monitoring complex based on video surveillance cameras, was provided.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3175,11 +3175,11 @@ "content": "The practicality and prospects of using such complexes for the monitoring needs of engineering infrastructure facilities, as well as the impact of the development of technologies used in them on global advancement in general, were discussed.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui 2014" } ] @@ -3190,11 +3190,11 @@ "content": "Image processing is a way to convert an image to a digital aspect and perform certain functions on it, in order to get an enhanced image or extract other useful information from it.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3205,11 +3205,11 @@ "content": "Image processing basically involves the following three steps, which are: Importing an image with an optical scanner or digital photography; Analysis and image management including data compression; and image enhancement and visual detection patterns such as satellite imagery.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3220,11 +3220,11 @@ "content": "The following libraries are involved in performing Image processing in python: Scikit-image; OpenCV; Mahotas; SimplelTK; SciPy; Pillow; Matplotlib.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3235,11 +3235,11 @@ "content": "OpenCV (Open Source Computer Vision Library) among others is one of the most widely used libraries in computer programming. OpenCV-Python is an OpenCV Python API. OpenCV-Python is not only running, because the background has a code written in C/C++, but it is also easy to extract and distribute (due to Python folding in the front).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Page 16-17" } ] @@ -3250,11 +3250,11 @@ "content": "Every home owner should have a monitoring system in place for real-time monitoring of in-house activity. Surya and Ningsih (2019) developed a system that includes a Raspberry Pi as the computing core and an Android-based smartphone for monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Surya & Ningsih 2019" } ] @@ -3265,11 +3265,11 @@ "content": "This system consists of a servo motor that can move the camera horizontally and vertically, a user-controllable video recording system, and a motion detection system that can alert users. System testing is done to obtain a real-time, dependable, and intelligent monitoring system by testing the network in terms of bandwidth characteristics and the number of clients in a network. According to test results with a maximum of two clients in one network, the system can take an average of 10.7 frames per second and has a camera movement response of less than one second.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Computer Vision" }, { - "attribute": "source", + "name": "source", "value": "Surya & Ningsih 2019" } ] @@ -3280,11 +3280,11 @@ "content": "Mohana and Aradhya (2019) talked about how efficient data has impacted performance benchmarks in terms of speed and accuracy.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3-2.4 Computer Vision / AI" }, { - "attribute": "source", + "name": "source", "value": "Mohana & Aradhya 2019" } ] @@ -3295,11 +3295,11 @@ "content": "Computer vision (CV) and artificial intelligence (AI) have also improved data visualization, processing, and analysis. Major tasks such as object detection and tracking for traffic vigilance systems have been made possible by this technology.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 Artificial Intelligence" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3310,11 +3310,11 @@ "content": "On the KITTI and COCO datasets, a convolutional neural network (CNN) model is constructed for single object detection on the urban vehicle dataset and YOLOv3 for multiple item detection on the YOLOv3 dataset.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 Artificial Intelligence" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3325,11 +3325,11 @@ "content": "Convolutional Neural Networks and deep learning artificial intelligence technologies are quickly evolving, primarily because AI processes large amounts of data much faster and makes predictions more accurately than humanly possible.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 Artificial Intelligence" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3340,11 +3340,11 @@ "content": "As of this writing, the primary disadvantage of using AI was expensive to process the large amounts of data that AI programming requires (Indolia, et al., 2018).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 Artificial Intelligence" }, { - "attribute": "source", + "name": "source", "value": "Indolia 2018" } ] @@ -3355,11 +3355,11 @@ "content": "In the current age of the fourth industrial revolution, the digital world has a wealth of data, such as Internet of Things (IoT) data, cybersecurity data, mobile data, business data, social media data, and health data.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 Artificial Intelligence" }, { - "attribute": "source", + "name": "source", "value": "Page 16" } ] @@ -3370,11 +3370,11 @@ "content": "Society is confronting a critical dilemma associated to greater life expectancy and a higher number of persons in dependent circumstances as a result of progress and demographic change.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Introduction / Societal context" }, { - "attribute": "source", + "name": "source", "value": "Document page 17 (page 16 in 0-indexed system)" } ] @@ -3385,11 +3385,11 @@ "content": "There is a significant demand for personal autonomy support systems.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Introduction / Societal context" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3400,11 +3400,11 @@ "content": "Chaaraoui et al. (2014), developed a vision home system that allows elderly and disabled individuals to live independently at home while receiving care and safety services via vision-based monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature reference / Vision-based monitoring" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3415,11 +3415,11 @@ "content": "The system's specification is also offered, as well as innovative contributions to human behavior analysis and privacy protection.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature reference / System specifications" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3430,11 +3430,11 @@ "content": "The behavior recognition method's experimental findings indicate excellent performance, as well as support for multi-view situations and real-time execution, both of which are necessary to provide the suggested services.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature results / Behavior recognition" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3445,11 +3445,11 @@ "content": "The study also discusses computer vision technologies, neural network techniques, and artificial intelligence methodologies in connection to the topic of monitoring.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature discussion / Technologies used" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3460,11 +3460,11 @@ "content": "As a result, a graphical representation of the structure of an intelligent system for support and decision-making, as well as a block diagram of a stationary monitoring complex based on video surveillance cameras, was provided.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature visuals / System diagrams" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3475,11 +3475,11 @@ "content": "The practicality and prospects of using such complexes for the monitoring needs of engineering infrastructure facilities, as well as the impact of the development of technologies used in them on global advancement in general, were discussed.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Literature discussion / Practicality & impact" }, { - "attribute": "source", + "name": "source", "value": "Chaaraoui et al., 2014" } ] @@ -3490,11 +3490,11 @@ "content": "Image processing is a way to convert an image to a digital aspect and perform certain functions on it, in order to get an enhanced image or extract other useful information from it.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing fundamentals" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3505,11 +3505,11 @@ "content": "It is a type of signal time when the input is an image, such as a video frame or image and output can be an image or features associated with that image.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing fundamentals" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3520,11 +3520,11 @@ "content": "Usually, the Image Processing system includes treating images as two equal symbols while using the set methods used.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing fundamentals" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3535,11 +3535,11 @@ "content": "It is one of the fastest growing technologies today, with its use in various business sectors.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing market/industry trend" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3550,11 +3550,11 @@ "content": "Graphic Design forms the core of the research space within the engineering and computer science industry as well.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Industry role / Graphic Design" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3565,11 +3565,11 @@ "content": "Image processing basically involves the following three steps, which are: Importing an image with an optical scanner or digital photography; Analysis and image management including data compression; and image enhancement and visual detection patterns such as satellite imagery.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing workflow" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3580,11 +3580,11 @@ "content": "It produces the final stage where the result can be changed to an image or report based on image analysis.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Image processing workflow" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3595,11 +3595,11 @@ "content": "The following libraries are involved in performing Image processing in python: Scikit-image; OpenCV; Mahotas; SimplelTK; SciPy; Pillow; Matplotlib.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Python libraries for image processing" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3610,11 +3610,11 @@ "content": "OpenCV (Open Source Computer 17 ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "OpenCV / Software library" }, { - "attribute": "source", + "name": "source", "value": "Document page 17" } ] @@ -3625,19 +3625,19 @@ "content": "OpenCV-Python is an OpenCV Python API. OpenCV-Python is not only running, because the background has a code written in C/C++, but it is also easy to extract and distribute (due to Python folding in the front). This makes it a good decision to make computer vision programs more robust.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Vision/ OpenCV and monitoring (OpenCV-Python description)" }, { - "attribute": "source", + "name": "source", "value": "OpenCV-Python description on page" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3648,19 +3648,19 @@ "content": "Every home owner should have a monitoring system in place for real-time monitoring of in-house activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Vision/ Applications (home monitoring advocacy)" }, { - "attribute": "source", + "name": "source", "value": "Text on page" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "positive" } ] @@ -3671,19 +3671,19 @@ "content": "Surya and Ningsih (2019) developed a system that includes a Raspberry Pi as the computing core and an Android-based smartphone for monitoring. This system consists of a servo motor that can move the camera horizontally and vertically, a user-controllable video recording system, and a motion detection system that can alert users.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Vision/ Monitoring System" }, { - "attribute": "source", + "name": "source", "value": "Surya & Ningsih (2019)" }, { - "attribute": "date", + "name": "date", "value": "2019" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3694,19 +3694,19 @@ "content": "According to test results with a maximum of two clients in one network, the system can take an average of 10.7 frames per second and has a camera movement response of less than one second.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3 Vision/ Monitoring System (test results)" }, { - "attribute": "source", + "name": "source", "value": "Surya & Ningsih (2019)" }, { - "attribute": "date", + "name": "date", "value": "2019" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3717,19 +3717,19 @@ "content": "Mohana and Aradhya (2019) talked about how efficient data has impacted performance benchmarks in terms of speed and accuracy.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.3/2.4 AI impact on performance (citation)" }, { - "attribute": "source", + "name": "source", "value": "Mohana & Aradhya (2019)" }, { - "attribute": "date", + "name": "date", "value": "2019" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3740,19 +3740,19 @@ "content": "Computer vision (CV) and artificial intelligence (AI) have also improved data visualization, processing, and analysis. Major tasks such as object detection and tracking for traffic vigilance systems have been made possible by this technology.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ CV impact" }, { - "attribute": "source", + "name": "source", "value": "General statement on page" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3763,23 +3763,23 @@ "content": "On the KITTI and COCO datasets, a convolutional neural network (CNN) model is constructed for single object detection on the urban vehicle dataset and YOLOv3 for multiple item detection on the YOLOv3 dataset.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ CNN and YOLO references" }, { - "attribute": "datasets", + "name": "datasets", "value": "KITTI, COCO" }, { - "attribute": "methods", + "name": "methods", "value": "CNN (single object), YOLOv3 (multi-object)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" }, { - "attribute": "source", + "name": "source", "value": "Page text" } ] @@ -3790,19 +3790,19 @@ "content": "Artificial Intelligence Convolutional Neural Networks and deep learning artificial intelligence technologies are quickly evolving, primarily because AI processes large amounts of data much faster and makes predictions more accurately than humanly possible.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ Deep learning evolution (opinion)" }, { - "attribute": "source", + "name": "source", "value": "Page text" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "positive" } ] @@ -3813,19 +3813,19 @@ "content": "While the huge volume of data being created on a daily basis would bury a human researcher, AI applications uses machine learning to take the data and quickly turn it into actionable information.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ ML usage" }, { - "attribute": "source", + "name": "source", "value": "Page text" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3836,19 +3836,19 @@ "content": "As of this writing, the primary disadvantage of using AI was expensive to process the large amounts of data that AI programming requires (Indolia, et al., 2018).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ limitations" }, { - "attribute": "source", + "name": "source", "value": "Indolia et al., 2018" }, { - "attribute": "date", + "name": "date", "value": "2018" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3859,19 +3859,19 @@ "content": "In the current age of the fourth industrial revolution, the digital world has a wealth of data, such as Internet of Things (IoT) data, cybersecurity data, mobile data, business data, social media data, and health data.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4 AI/ data types" }, { - "attribute": "source", + "name": "source", "value": "Page text" }, { - "attribute": "date", + "name": "date", "value": "n/a" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -3882,19 +3882,19 @@ "content": "analyze these data and develop the corresponding smart and automated applications, the knowledge of artificial intelligence (AI), particularly, machine learning (ML) is the key (Sarker, 2021).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Sarker 2021" }, { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "date", + "name": "date", "value": "2021" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -3905,15 +3905,15 @@ "content": "Deep learning, as part of a broader family of machine learning methods, intelligently analyzes the data on a large scale with several application capabilities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "General" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -3924,15 +3924,15 @@ "content": "Nicosia, (2019) developed a support vector machine and decision tree algorithm as dataset from Udacity were deployed with python programming language to create and train the algorithm for vehicle detection and tracking using machine learning.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Nicosia 2019" }, { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -3943,15 +3943,15 @@ "content": "This machine is capable of monitoring and reporting multi-function activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Nicosia 2019" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.70" } ] @@ -3962,15 +3962,15 @@ "content": "A conceptual model for successful implementation of machine learning in organization was presented in this article.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "(article)" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.75" } ] @@ -3981,15 +3981,15 @@ "content": "Najafabadi, et al., (2015), investigated on how Deep Learning is being utilized for addressing some important problems in Big Data Analytics, including extracting complex patterns from massive volumes of data, semantic indexing, data tagging, fast information retrieval, and simplifying discriminative tasks.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4000,15 +4000,15 @@ "content": "Big Data has become important as many organizations both public and private have been collecting massive amounts of domain-specific information, which can contain useful information about problems such as national intelligence, cyber security, car detection, fraud detection, marketing, and medical informatics.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4019,15 +4019,15 @@ "content": "Deep Learning algorithms extract high-level, complex abstractions as data representations through a hierarchical learning process, which has made it a valuable tool for Big Data Analytics where raw data is largely unlabeled and uncategorized.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4038,15 +4038,15 @@ "content": "Machine learning techniques are divided into three categories, which are:\ni. Supervised Machine Learning ...\nii. Unsupervised Machine Learning ...\niii. Reinforcement Learning ", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4057,15 +4057,15 @@ "content": "The most widely used machine learning algorithms are supervised machine learning algorithms. A data scientist serves as a guide in this model, instructing the algorithm on what conclusions it should reach. In supervised learning, the algorithm is trained by a dataset that is already labeled and has a preset output, similar to how a youngster learns to identify fruits by remembering them in a picture book. Algorithms like linear and logistic regression, multiclass classification, and support vector machines are examples of supervised machine learning.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.90" } ] @@ -4076,15 +4076,15 @@ "content": "Unsupervised machine learning entails learning from data that lacks labels or a clearly specified outcome.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4095,15 +4095,15 @@ "content": "Data sets aren't labeled but, after performing an action or several actions, the AI system is given feedback. To choose an appropriate method for learning techniques requires the structure and volume of data, as well as the use case for further application which depends on either a supervised or unsupervised technique that was used.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "Najafabadi 2015" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4114,15 +4114,15 @@ "content": "enabling a wide range of corporate goals and use cases, including: customer lifetime value, anomaly detection, dynamic pricing, predictive maintenance, image classification, and recommendation engines.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Machine Learning" }, { - "attribute": "source", + "name": "source", "value": "page 20 excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4133,15 +4133,15 @@ "content": "Deep learning as a subset of machine learning is essential of a three-layer neural network. These neural networks aim to imitate the activity of the human brain by allowing it to learn from enormous amounts of data, albeit they fall far short of its capabilities. While single-layer neural network may produce approximate predictions, additional hidden layers could help to optimize and improve for accuracy.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "2.4.2 Deep Learning section" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.85" } ] @@ -4152,15 +4152,15 @@ "content": "Many artificial intelligence (AI) apps and services rely on deep learning to improve automation by executing analytical and physical activities without the need for human participation. Everyday products and services (such as digital assistants, voice-enabled TV remotes, such as self-driving cars, and credit card fraud detection) as well as upcoming innovations use deep learning technology.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "2.4.2 Deep Learning section" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4171,15 +4171,15 @@ "content": "Hordri et al., (2017), reported that deep learning was suitable for better analysis, and it could learn enormous amounts of unlabeled data in various field.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Hordri 2017" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4190,15 +4190,15 @@ "content": "Deep learning finds its application in automatic speech recognition, image recognition, natural language processing, drug discovery and toxicology, customer relationship management, recommendation system and bioinformatics.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Hordri 2017" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4209,15 +4209,15 @@ "content": "Deep Learning has generated complexities in algorithms, and researchers and users have raised concerns regarding the usability and adoptability of Deep Learning systems (Kaluarachchi, et al., 2021).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Kaluarachchi 2021" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4228,15 +4228,15 @@ "content": "These concerns, coupled with the increasing human-artificial intelligence interactions, has created the emerging field that was Human-Centered Machine Learning (HCML).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Kaluarachchi 2021" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.75" } ] @@ -4247,15 +4247,15 @@ "content": "Collaboration with field domain experts to develop a working definition for HCML was made. The topology of the HCML landscape by research gaps identification, were analyzed, highlighting conflicting interpretations, addressing current challenges, and presenting future HCML research opportunities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Kaluarachchi 2021" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.75" } ] @@ -4266,15 +4266,15 @@ "content": "A cloud based system comprises of collections of network-connected distributed computing and storage servers that provides virtual environments for different system operations, application", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.5 Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Page 19 excerpt" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.80" } ] @@ -4285,27 +4285,27 @@ "content": "enabling a wide range of corporate goals and use cases, including: customer lifetime value, anomaly detection, dynamic pricing, predictive maintenance, image classification, and recommendation engines.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.1 Deep Learning Use Cases" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "Deep Learning Use Cases" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4316,27 +4316,27 @@ "content": "Deep learning as a subset of machine learning is essential of a three-layer neural network. These neural networks aim to imitate the activity of the human brain by allowing it to learn from enormous amounts of data, albeit they fall far short of its capabilities. While single-layer neural network may produce approximate predictions, additional hidden layers could help to optimize and improve for accuracy.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "Hordri et al., 2017" }, { - "attribute": "topic", + "name": "topic", "value": "Deep Learning Theory" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4347,27 +4347,27 @@ "content": "Many artificial intelligence (AI) apps and services rely on deep learning to improve automation by executing analytical and physical activities without the need for human participation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "Kaluarachchi et al., 2021" }, { - "attribute": "topic", + "name": "topic", "value": "Automation via Deep Learning" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4378,27 +4378,27 @@ "content": "Everyday products and services (such as digital assistants, voice-enabled TV remotes, such as self-driving cars, and credit card fraud detection) as well as upcoming innovations use deep learning technology.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "Applications/Examples of DL" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4409,27 +4409,27 @@ "content": "The deep learning algorithm then changes and fits itself for accuracy via gradient descent and backpropagation,", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "DL Algorithms" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4440,27 +4440,27 @@ "content": "Hordri et al., (2017), reported that deep learning was suitable for better analysis, and it could learn enormous amounts unlabeled data in various field.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "2017" }, { - "attribute": "author", + "name": "author", "value": "Hordri et al." }, { - "attribute": "topic", + "name": "topic", "value": "Deep Learning Applications/Analysis" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4471,27 +4471,27 @@ "content": "Deep Learning find s application in automatic speech recognition, image recognition, natural language processing, drug discovery and toxicology, customer relationship management, recommendation system and bioinformatics.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "DL Applications" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4502,27 +4502,27 @@ "content": "Deep Learning has generated complexities in algorithms, and researchers and users have raised concerns regarding the usability and adoptability of Deep Learning systems (Kaluarachchi, et al., 2021).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "2021" }, { - "attribute": "author", + "name": "author", "value": "Kaluarachchi et al." }, { - "attribute": "topic", + "name": "topic", "value": "HCML Concerns" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4533,27 +4533,27 @@ "content": "These concerns, coupled with the increasing human-artificial intelligence (AI) interactions, has created the emerging field that was Human-Centered Machine Learning (HCML). Collaboration with field domain experts to develop a working definition for HCML was made.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning/HCML" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "HCML Definition" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4564,23 +4564,23 @@ "content": "The topology of the HCML landscape by research gaps identification, were analyzed, highlighting conflicting interpretations, addressing current challenges, and presenting future HCML research opportunities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.4.2 Deep Learning/HCML" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "HCML Landscape" } ] @@ -4591,27 +4591,27 @@ "content": "2.5 Cloud Based Architecture A cloud based system comprises of collections of network-connected distributed computing and storage servers that provides virtual environments for different system operations, application", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.5 Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Page 20" }, { - "attribute": "date", + "name": "date", "value": "N/A" }, { - "attribute": "author", + "name": "author", "value": "N/A" }, { - "attribute": "topic", + "name": "topic", "value": "Cloud Architecture" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4622,19 +4622,19 @@ "content": "A cloud based system comprises of collections of network-connected distributed computing and storage servers that provides virtual environments for different system operations, application containers, and computing services (Roehl, 2019).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" }, { - "attribute": "topic", + "name": "topic", "value": "Cloud architecture basics" } ] @@ -4645,15 +4645,15 @@ "content": "Software system functions on cloud platform by deploying it on the cloud platform's virtual machines, albeit the benefit is limited to eliminating the requirement for actual hardware acquisition and maintenance.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4664,15 +4664,15 @@ "content": "Within the cloud servers, users must still set up the appropriate computing environments, such as dependency libraries, networking, and storage system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4683,15 +4683,15 @@ "content": "Some of these responsibilities are alleviated by application containers, which give interfaces to computing environments that users do not have to set up separately for each deployment of the system software on a cloud platform.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4702,15 +4702,15 @@ "content": "The use of cloud-based platforms can be classified into Infrastructure, Platform, Function, and software services:", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4721,15 +4721,15 @@ "content": "i. Infrastructure as a Service (IaaS) This refers to the ability of a cloud platform to act as virtual machines for software applications that run on local servers. Amazon EC2 (Elastic Compute Cloud) is such an example, where users deploy software applications by launching instances of virtual servers, uploading the applications, and executing them.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4740,15 +4740,15 @@ "content": "iv. Software as a Service (SaaS) 22", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4759,15 +4759,15 @@ "content": "Amazon EC2 (Elastic Compute Cloud) is such an example, where users deploy software applications by launching instances of virtual servers, uploading the applications, and executing them. The virtual machines are distributed and are used as stable storage such for platforms such Amazon S3 (Simple Storage Service) buckets.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4778,15 +4778,15 @@ "content": "ii. Platform as a Service (PaaS) This manage the computation environment for specialized applications such as Web servlets. Google\\'s app engine is such an example, where users develop software programs using app engine development tools and deploy the program to the app engine for execution.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4797,15 +4797,15 @@ "content": "In this system, users need to develop complete software programs as in IaaS except that users rely on the API of the platform for runtime support, networking, and storage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4816,15 +4816,15 @@ "content": "iii. Function as a Service (FaaS) AWS Lambda and Azure Functions allow users to implement lightweight applications as stateless functions, which can be used for high throughput processing such as data transformation, altering, and event detection.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4835,15 +4835,15 @@ "content": "FaaS functions connect to storage and network through APIs.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4854,15 +4854,15 @@ "content": "FaaS functions are considered server less since there are no dedicated servers allocated for running the functions and the cost is based on calls to the functions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud Based Architecture" }, { - "attribute": "source", + "name": "source", "value": "Roehl 2019" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -4873,15 +4873,15 @@ "content": "Infrastructure as a Service (IaaS) This refers to the ability of a cloud platform to act as virtual machines for software applications that run on local servers. Amazon EC2 (Elastic Compute Cloud) is such an example, where users deploy software applications by launching instances of virtual servers, uploading the applications, and executing them.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud computing models (IaaS)" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4892,15 +4892,15 @@ "content": "Platform as a Service (PaaS) This manage the computation environment for specialized applications such as Web servlets. Google\\'s app engine is such an example, where users develop software programs using app engine development tools and deploy the programs to the app engine for execution.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud computing models (PaaS)" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4911,15 +4911,15 @@ "content": "iii. Function as a Service (FaaS) AWS Lambda and Azure Functions allow users to implement lightweight applications as stateless functions, which can be used for high throughput processing such as data transformation, altering, and event detection. FaaS functions connect to storage and network through APIs. FaaS functions are considered server less since there are no dedicated servers allocated for running the functions and the cost is based on calls to the functions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud computing models (FaaS)" }, { - "attribute": "source", + "name": "source", "value": "Page 21-22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4930,15 +4930,15 @@ "content": "This refers to software applications hosted by cloud servers such as Dropbox, Microsoft Oce 365, and Google Apps that deliver functionalities over the network.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud computing models (SaaS)" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4949,15 +4949,15 @@ "content": "For data transformation, modification, and event monitoring, this system employs Function Service, to serve as an event-based architecture. In view of its flexibility and scalability of resources, and is used for customizable IoT applications that is handled by the as IoT Hub and Stream Analytics services.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Cloud computing models (FaaS/SaaS) and IoT integration" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -4968,15 +4968,15 @@ "content": "2.6 Summary of the Reviewed Literatures In general, several tools were discovered for the development of an artificial intelligence-enabled multi-enabled activity monitoring and reporting system which includes, computer vision, convolutional neural network (CNN), web app as user interface for smart monitoring and reporting.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "2.6 Summary of Reviewed Literature" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -4987,15 +4987,15 @@ "content": "Convolutional neural network has the capability for image processing, image recognition, object detection & segmentation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "CNN capabilities" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5006,15 +5006,15 @@ "content": "The CNN uses a hierarchical model that builds a network, similar to a funnel, and then outputs a fully-connected layer in which all neurons are connected to each other and the output is processed. Hence, CNN plays a major role in the develop of this system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "CNN architecture and role" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5025,15 +5025,15 @@ "content": "As a branch of artificial intelligence (AI), computer vision allows computers and systems to extract useful information from digital photos, videos, and other visual inputs, and to conduct actions or make suggestions based on that data.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Computer vision overview" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5044,15 +5044,15 @@ "content": "With cameras, data, and algorithms, computer vision trains this system to do these tracking, identification, and data logging duties in much less time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Applications of computer vision" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5063,15 +5063,15 @@ "content": "In addition, a system trained to inspect items or monitor a manufacturing asset can evaluate thousands of products or processes every minute, detecting faults or abnormalities that are otherwise undetectable.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Industrial inspection capabilities" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5082,15 +5082,15 @@ "content": "In this system OCR is used to automate data extraction from a scanned document or image file and then transform the text to a machine-readable format for data processing. This saves time and resources that would be required to manage unsearchable data otherwise. OCR eliminates human data entry, saves resources by processing more data faster and with fewer resources, lowers errors by providing a 98 to 99 percent accuracy range, and boosts productivity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "OCR and data extraction" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5101,15 +5101,15 @@ "content": "IoT technology enhances GPS devices to transmit data remotely and connect to other systems and sensors, for the sole purpose of collecting and transmitting comprehensive ongoing activity identification.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "IoT integration with GPS and sensors" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5120,15 +5120,15 @@ "content": "Although individuals can engage with the devices to set them up,", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Page 22 continuity notes" }, { - "attribute": "source", + "name": "source", "value": "Page 22" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -5139,23 +5139,23 @@ "content": "give them instructions, or retrieve data, the gadgets do the majority of the work without human participation.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 23" }, { - "attribute": "page_number", + "name": "page_number", "value": "23" }, { - "attribute": "topic", + "name": "topic", "value": "automation/gadgets" }, { - "attribute": "certainty", + "name": "certainty", "value": "high" }, { - "attribute": "tone", + "name": "tone", "value": "neutral" } ] @@ -5166,23 +5166,23 @@ "content": "give them instructions, or retrieve data, the gadgets do the majority of the work without human participation.", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "24" }, { - "attribute": "context", + "name": "context", "value": "automation description" }, { - "attribute": "tone", + "name": "tone", "value": "neutral" }, { - "attribute": "source", + "name": "source", "value": "document page 24" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5193,23 +5193,23 @@ "content": "give them instructions, or retrieve data, the gadgets do the majority of the work without human participation.", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "24" }, { - "attribute": "context", + "name": "context", "value": "human-automation workflow" }, { - "attribute": "tone", + "name": "tone", "value": "neutral" }, { - "attribute": "source", + "name": "source", "value": "document page 24" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -5220,19 +5220,19 @@ "content": "Artificial intelligence in activity detection and monitoring systems has application in security, surveillance, health, and a variety of sectors, where activities are monitored leveraging RCNN, pose estimation, and internet of things (IoT) processes as its approach for developing an AI-based multi-function activity monitoring and reporting system with a mobile application to serve as a gateway for interfacing with the system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.1 Overview" }, { - "attribute": "chapter", + "name": "chapter", "value": "CHAPTER THREE" }, { - "attribute": "page", + "name": "page", "value": "25" }, { - "attribute": "topic", + "name": "topic", "value": "AI-based activity monitoring applications" } ] @@ -5243,11 +5243,11 @@ "content": "where activities are monitored leveraging RCNN, pose estimation, and internet of things (IoT) processes as its approach for developing an AI-based multi-function activity monitoring and reporting system with a mobile application to serve as a gateway for interfacing with the system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.1 Overview" }, { - "attribute": "method", + "name": "method", "value": "RCNN, pose estimation, IoT" } ] @@ -5258,11 +5258,11 @@ "content": "... AI-based multi-function activity monitoring and reporting system with a mobile application to serve as a gateway for interfacing with the system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.1 Overview" }, { - "attribute": "feature", + "name": "feature", "value": "mobile gateway" } ] @@ -5273,11 +5273,11 @@ "content": "OpenCV (Open-Source Computer Vision Library) has rich plugins for computer vision, machine learning, and image processing, and it presently plays a key role in real-time operation, which is crucial to this AI based multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.1 Software Materials" }, { - "attribute": "tool", + "name": "tool", "value": "OpenCV" } ] @@ -5288,11 +5288,11 @@ "content": "It recognizes items, people, identify objects, classify human actions, track camera movements, track moving objects, extract 3D models of objects, produce 3D point clouds from stereo cameras, stitch images together to produce a high resolution image of an entire scene, facial movements, detect scenery, and establish markers to overlay it with a 3D model.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.1 Software Materials" }, { - "attribute": "tool", + "name": "tool", "value": "OpenCV" } ] @@ -5303,11 +5303,11 @@ "content": "TensorFlow is adopted in this project for use because it has a symbolic math toolkit that employs dataflow and differentiable programming to handle a variety of tasks related to deep neural network training and inference. It’s used for image classification in this research work.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.1 Software Materials" }, { - "attribute": "tool", + "name": "tool", "value": "TensorFlow" } ] @@ -5318,19 +5318,19 @@ "content": "The Raspberry Pi 3 has a low-cost, credit-card-sized computer that connects to a computer display or TV and operates with a regular keyboard and mouse. It is a powerful tiny computer that allows individuals of all ages to experiment with computing and learn to write in languages such as Scratch and Python.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "i. Raspberry Pi 3" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.82" } ] @@ -5341,23 +5341,23 @@ "content": "It also has GPIO (general purpose input/output) pins for controlling electrical components for physical computing and exploring the Internet of Things (IoT) as in fig. 3.1.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "i. Raspberry Pi 3" }, { - "attribute": "feature", + "name": "feature", "value": "GPIO pins" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.79" } ] @@ -5368,19 +5368,19 @@ "content": "Figure 3.1:Raspberry Pi 3 Controller", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "i. Raspberry Pi 3" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.75" } ] @@ -5391,19 +5391,19 @@ "content": "ii. Raspberry Pi Camera Module V2 This 8MP camera module connects straight to the Raspberry Pi to collect video feed.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.78" } ] @@ -5414,19 +5414,19 @@ "content": "It has supports of pixel static images, , and video.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.65" } ] @@ -5437,19 +5437,19 @@ "content": "It is 3280 × 2464 1080 𝑝 30720 𝑝 60 640 ×480 𝑝 90adopted for this design because this is the plug-and-play version of the Raspbian operating system, making it ideal for video recording, motion detection, and security applications.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.42" } ] @@ -5460,19 +5460,19 @@ "content": "adopted for this design because this is the plug-and-play version of the Raspbian operating system, making it ideal for video recording, motion detection, and security applications.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.38" } ] @@ -5483,19 +5483,19 @@ "content": "Connect the provided ribbon cable to your Raspberry Pi's CSI (Camera Serial Interface) connector, and you're ready to go!", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.77" } ] @@ -5506,19 +5506,19 @@ "content": "The board itself is small, measuring around and 25 𝑚𝑚 × 23 𝑚𝑚 × 9 𝑚𝑚weighing little over 3g, making it ideal for mobile or other applications where size and weight", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.68" } ] @@ -5529,19 +5529,19 @@ "content": "Figure 3.1:Raspberry Pi 3 Controller", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "i. Raspberry Pi 3" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.70" } ] @@ -5552,19 +5552,19 @@ "content": "It is 3280 × 2464 1080 𝑝 30720 𝑝 60 640 ×480 𝑝 90adopted for this design because this is the plug-and-play version of the Raspbian operating system, making it ideal for video recording, motion detection, and security applications.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.50" } ] @@ -5575,19 +5575,19 @@ "content": "The board itself is small, measuring around and 25 𝑚𝑚 × 23 𝑚𝑚 × 9 𝑚𝑚weighing little over 3g, making it ideal for mobile or other applications where size and weight", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.2.2 Hardware Materials" }, { - "attribute": "subsection", + "name": "subsection", "value": "ii. Raspberry Pi Camera Module V2" }, { - "attribute": "page", + "name": "page", "value": "26" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.48" } ] @@ -5598,15 +5598,15 @@ "content": "are critical. The sensor features an 8-megapixel native resolution and a fixed-focus lens as in fig.3.2.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27; Figure 3.2; Hardware description" }, { - "attribute": "section", + "name": "section", "value": "Camera hardware (Raspberry Pi Camera Module V2)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5617,15 +5617,15 @@ "content": "The sensor features an 8-megapixel native resolution and a fixed-focus lens as in fig.3.2. Figure. 3.2: Raspberry Pi Camera Module V2", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27; Figure 3.2" }, { - "attribute": "section", + "name": "section", "value": "Camera hardware (Raspberry Pi Camera Module V2)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5636,15 +5636,15 @@ "content": "Figure. 3.2: Raspberry Pi Camera Module V2 3.3 Method The block diagram in Figure 3.3 shows AI based multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27; Figure 3.3" }, { - "attribute": "section", + "name": "section", "value": "AI-based system (Figure 3.3)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5655,15 +5655,15 @@ "content": "i. Video Processing This entails using image processing techniques to process a video feed. This takes a succession of photographs in a frame, starting with image acquisition, which entails collecting the images from a source, in our case the raspberry pi camera module v2. The next step is to convert the image to grayscale so that it can be processed.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27" }, { - "attribute": "section", + "name": "section", "value": "Video Processing" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5674,15 +5674,15 @@ "content": "ii. Load Model Single-Shot multibox Detection (SSD) mobilenet network was utilized for this project. This was used to perform motion detection. The process involves loading the model using Keras/OpenCV to make it accessible for use.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27" }, { - "attribute": "section", + "name": "section", "value": "Model (SSD Mobilenet) for motion detection" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5693,15 +5693,15 @@ "content": "3.3 Method The block diagram in Figure 3.3 shows AI based multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Document page 27" }, { - "attribute": "section", + "name": "section", "value": "Overall system description" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -5712,11 +5712,11 @@ "content": "3.3.1 Design of a multi-function activity monitoring and reporting system To design a multi-function activity monitoring and reporting system, Raspberry Pi 3 controller and Pi Camera module was adapted. Raspbian Operating system, being the most populous operating system for the raspberry pi controller was installed as the base operating system to enable swift operation and integration of the controller and the Pi camera module.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1" }, { - "attribute": "source", + "name": "source", "value": "Page 28" } ] @@ -5727,7 +5727,7 @@ "content": "This entails using image processing techniques to process a video feed. This takes a succession of photographs in a frame, starting with image acquisition, which entails collecting the images from a source, in our case the raspberry pi camera module v2. The next step is to convert the image to grayscale so that it can be processed.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5738,11 +5738,11 @@ "content": "Load Model Single-Shot multibox Detection (SSD) mobilenet network was utilized for this project. This was used to perform motion detection. The process involves loading the model using Keras/OpenCV to make it accessible for use.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" }, { - "attribute": "method", + "name": "method", "value": "SSD MobileNet" } ] @@ -5753,7 +5753,7 @@ "content": "iii. Object Detection and Recognition To detect multiple activities, object detection and recognition techniques is adapted. This uses systems like multibox detection to identify objects in the frame which draws a rectangle around its extent. This combines two tasks which are object localization and image classification. The initial is the process of identifying the object in the frame and drawing a frame around it, whereas the latter is to locate the presence of the located object and its types or classes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5764,7 +5764,7 @@ "content": "iv. Pose Estimation A custom pre-trained mobilenet model was adapted for this section. This is a technique for predicting and tracking the location of a person by looking at the combination of the pose and the orientation of the person. This is used for recognizing postural activities like sitting and standing in this research.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5775,7 +5775,7 @@ "content": "v. Database and Storage In this section, data is collected, and reports are stored in the cloud using Google Firebase services. This keeps the videos, data and perform analysis on them. It is used to send notifications and request to the mobile interface and the A.I backend.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5786,7 +5786,7 @@ "content": "vi. Mobile Interface A beautiful interface rich in User Experience (UX) was built for monitoring and managing activities. The application runs the multi-function activity monitoring and reporting system and give useful analytical information. It also allows the user to set activities to be tracked, the time and the report channel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5797,7 +5797,7 @@ "content": "3.3.1 Design of a multi-function activity monitoring and reporting system To design a multi-function activity monitoring and reporting system, Raspberry Pi 3 controller and Pi Camera module was adapted. Raspbian Operating system, being the most populous operating system for the raspberry pi controller was installed as the base operating system to enable swift operation and integration of the controller and the Pi camera module. This section gives detailed information about data acquisition and preparation, video feed acquisition, activity 29", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1" } ] @@ -5808,7 +5808,7 @@ "content": "This section gives detailed information about data acquisition and preparation, video feed acquisition, activity", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3" } ] @@ -5819,23 +5819,23 @@ "content": "3.3.1 Design of a multi-function activity monitoring and reporting system To design a multi-function activity monitoring and reporting system, Raspberry Pi 3 controller and Pi Camera module was adapted. Raspbian Operating system, being the most populous operating system for the raspberry pi controller was installed as the base operating system to enable swift operation and integration of the controller and the Pi camera module.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1 Design of a multi-function activity monitoring and reporting system" }, { - "attribute": "page", + "name": "page", "value": "29" }, { - "attribute": "device", + "name": "device", "value": "Raspberry Pi 3, Pi Camera" }, { - "attribute": "os", + "name": "os", "value": "Raspbian" }, { - "attribute": "source", + "name": "source", "value": "Page 29 text" } ] @@ -5846,19 +5846,19 @@ "content": "3.3.1 1 Data Acquisition and Preparation Data acquisition is the first step in the training process for every machine learning model. Even though the procedure may be automated in many ways, in this training scenario, quality photos were acquired, edited to remove noisy data, and augmented by manually collecting the data and preparing it. For training and testing, more than a thousand samples were gathered in this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.1 Data Acquisition and Preparation" }, { - "attribute": "page", + "name": "page", "value": "29" }, { - "attribute": "notes", + "name": "notes", "value": ">1000 samples" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5869,15 +5869,15 @@ "content": "3.3.1.2 Video Feed Acquisition This is the process of obtaining a video feed from the Pi Camera. This is the first step and one of the most crucial steps in any video utilized research. In this project work, video feeds are acquired from Homes or Hospitals depending on where the camera is mounted. The aim in this case is to get the video of the patient or an empty room. This video is processed frame by frame using computer vision techniques to enable analysis and processes to run. Below is a pseudo code of how video feed is acquired in this project. Step 1: START Step 2: Load an Image Step 3: Initialize a function 𝑉𝑖𝑑𝑒𝑜𝐶𝑎𝑝𝑡𝑢𝑟𝑒 ( ) 30", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.2 Video Feed Acquisition" }, { - "attribute": "page", + "name": "page", "value": "29" }, { - "attribute": "note", + "name": "note", "value": "Video feed from Pi Camera; Homes/Hospitals; pseudo code provided" } ] @@ -5888,15 +5888,15 @@ "content": "3.3.1.3 Activity detection and recognition Mobilenets are one of the fastest machine learning models which are built, scaled and optimized mainly for low-level devices like the controller. Its design implements lightweight deep neural networks using proven depth-wise separable convolutions. The mobilenetSSD adapted in this project uses the coco dataset which has classes of over 100 objects. This can detect over 100 objects in realtime from our video feed but only limited to detecting and recognizing humans. The later, being the custom mobilenet uses a pre-trained model with only three classes: sitting, standing and laying as in fig.3.6.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.3 Activity detection and recognition" }, { - "attribute": "page", + "name": "page", "value": "29" }, { - "attribute": "note", + "name": "note", "value": "MobileNet/SSD; COCO; 100+ classes; three-class custom model" } ] @@ -5907,19 +5907,19 @@ "content": "Figure 3.4: Circuit design for multi-function activity monitoring and reporting system", "attributes": [ { - "attribute": "figure", + "name": "figure", "value": "Figure 3.4" }, { - "attribute": "section", + "name": "section", "value": "3.3.1.2" }, { - "attribute": "source", + "name": "source", "value": "Page 30" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5930,27 +5930,27 @@ "content": "3.3.1.1 Data Acquisition and Preparation Data acquisition is the first step in the training process for every machine learning model. Even though the procedure may be automated in many ways, in this training scenario, quality photos were acquired, edited to remove noisy data, and augmented by manually collecting the data and preparing it. For training and testing, more than a thousand samples were gathered in this project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.1" }, { - "attribute": "data_volume", + "name": "data_volume", "value": ">1000" }, { - "attribute": "data_cleaning", + "name": "data_cleaning", "value": "edited to remove noisy data" }, { - "attribute": "augmentation", + "name": "augmentation", "value": "manual collection" }, { - "attribute": "source", + "name": "source", "value": "Page 30" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5961,27 +5961,27 @@ "content": "3.3.1.2 Video Feed Acquisition This is the process of obtaining a video feed from the Pi Camera. This is the first step and one of the crucial steps in any video utilized research. In this project work, video feeds are acquired from Homes or Hospitals depending on where the camera is mounted. The aim in this case is to get the video of the patient or an empty room. This video is processed frame by frame using computer vision techniques to enable analysis and processes to run. Below is a pseudo code of how video feed is acquired in this project. Step 1: START Step 2: Load an Image Step 3: Initialize a function 𝑉𝑖𝑑𝑒𝑜𝐶𝑎𝑝𝑡𝑢𝑟𝑒 ( )", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.2" }, { - "attribute": "hardware", + "name": "hardware", "value": "Pi Camera" }, { - "attribute": "video_goal", + "name": "video_goal", "value": "video of patient or empty room" }, { - "attribute": "frame_processing", + "name": "frame_processing", "value": "frame-by-frame with computer vision techniques" }, { - "attribute": "source", + "name": "source", "value": "Page 30" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -5992,11 +5992,11 @@ "content": "This is the first step and one of the crucial steps in any video utilized research.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.2" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "positive importance" } ] @@ -6007,7 +6007,7 @@ "content": "In this project work, video feeds are acquired from Homes or Hospitals depending on where the camera is mounted. The aim in this case is to get the video of the patient or an empty room. This video is processed frame by frame using computer vision techniques to enable analysis and processes to run.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.2" } ] @@ -6018,11 +6018,11 @@ "content": "Below is a pseudo code of how video feed is acquired in this project. Step 1: START Step 2: Load an Image Step 3: Initialize a function 𝑉𝑖𝑑𝑒𝑜𝐶𝑎𝑝𝑡𝑢𝑟𝑒 ( )", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1.2" }, { - "attribute": "pseudo_code", + "name": "pseudo_code", "value": "Step 1 START; Step 2 Load an Image; Step 3 Initialize VideoCapture()" } ] @@ -6033,7 +6033,7 @@ "content": "Figure 3.4: Circuit design for multi-function activity monitoring and reporting system", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.1" } ] @@ -6044,23 +6044,23 @@ "content": "Step 4: Initialize an infinite while loop Step 5: Read each frame from while loop Step 6: Perform operations and processes Step 7: Display video feed Step 8: Press “q” to exit Step 9: STOP", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Video acquisition algorithm (Steps 4-9)" }, { - "attribute": "step_range", + "name": "step_range", "value": "4-9" }, { - "attribute": "page", + "name": "page", "value": "31" }, { - "attribute": "source", + "name": "source", "value": "Figure 3.5 (flowchart)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6071,19 +6071,19 @@ "content": "The flowchart shown below in figure 3.5 describes the algorithm for video acquisition", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Video acquisition algorithm" }, { - "attribute": "figure", + "name": "figure", "value": "Figure 3.5" }, { - "attribute": "page", + "name": "page", "value": "31" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6094,15 +6094,15 @@ "content": "Figure 3.5: Flowchart for video acquisition", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Figure caption" }, { - "attribute": "figure", + "name": "figure", "value": "Figure 3.5" }, { - "attribute": "page", + "name": "page", "value": "31" } ] @@ -6113,15 +6113,15 @@ "content": "A picture is entered into MobilenetSSD, which produces ( 3 , 300 , 300 ) scores.", "attributes": [ { - "attribute": "model", + "name": "model", "value": "MobileNet-SSD" }, { - "attribute": "output_shape", + "name": "output_shape", "value": "(3,300,300)" }, { - "attribute": "section", + "name": "section", "value": "Architecture" } ] @@ -6132,15 +6132,15 @@ "content": "The 20 different item categories are represented by confidence levels in Scores, with 0 representing the backdrop.", "attributes": [ { - "attribute": "classes", + "name": "classes", "value": "20" }, { - "attribute": "background_label", + "name": "background_label", "value": "0" }, { - "attribute": "section", + "name": "section", "value": "Architecture" } ] @@ -6151,11 +6151,11 @@ "content": "For each item during training, SSD just requires a ground truth box and an input picture.", "attributes": [ { - "attribute": "training_requirements", + "name": "training_requirements", "value": "ground_truth_box, input_picture" }, { - "attribute": "section", + "name": "section", "value": "Training" } ] @@ -6166,11 +6166,11 @@ "content": "The small set of default boxes were examined (for example, 4) with various aspect ratios at each position in several feature maps with various sizes (for example, and in (b) and (c)).", "attributes": [ { - "attribute": "default_boxes_config", + "name": "default_boxes_config", "value": "aspect_ratios across multiple feature maps and sizes" }, { - "attribute": "section", + "name": "section", "value": "Architecture" } ] @@ -6181,11 +6181,11 @@ "content": "The shape offsets and confidences for each 8 × 8 4 × 4default box for all object categories was forecast.", "attributes": [ { - "attribute": "prediction_scope", + "name": "prediction_scope", "value": "offsets and confidences for each default box across all categories" }, { - "attribute": "section", + "name": "section", "value": "Architecture" } ] @@ -6196,11 +6196,11 @@ "content": "The first match of these ( ( 𝐶 1 , 𝐶 2 , \" . . , 𝐶 ) )default boxes to the ground truth boxes at training time.", "attributes": [ { - "attribute": "matching_criterion", + "name": "matching_criterion", "value": "first_match_to_ground_truth_at_training_time" }, { - "attribute": "section", + "name": "section", "value": "Training" } ] @@ -6211,11 +6211,11 @@ "content": "As an illustration in Figure 3.7, two default boxes with two persons in the frame is matched; these are considered positives, and the remaining default boxes are considered negatives.", "attributes": [ { - "attribute": "example_description", + "name": "example_description", "value": "two default boxes matched to two persons; positives; others negatives" }, { - "attribute": "section", + "name": "section", "value": "Training" } ] @@ -6226,11 +6226,11 @@ "content": "The model loss is a weighted average of the localization loss and the confidence loss (e.g. Softmax).", "attributes": [ { - "attribute": "loss_function", + "name": "loss_function", "value": "weighted average of localization and confidence (Softmax)" }, { - "attribute": "section", + "name": "section", "value": "Training" } ] @@ -6241,11 +6241,11 @@ "content": "3.3.1.4 Reporting Channels To receive updates on activities detected and recognized, Short Message Service (SMS), Email and In-App notifications are used as channels for receiving information.", "attributes": [ { - "attribute": "reporting_channels", + "name": "reporting_channels", "value": "SMS, Email, In-App" }, { - "attribute": "section", + "name": "section", "value": "3.3.1.4 Reporting Channels" } ] @@ -6256,19 +6256,19 @@ "content": "This uses Twilio Application Program Interface (API) to send SMS from the script that’s being executed on the controller. Nodemailer and Push notifications were adapted for email and in-app notification.", "attributes": [ { - "attribute": "sms_via", + "name": "sms_via", "value": "Twilio API" }, { - "attribute": "email_via", + "name": "email_via", "value": "Nodemailer" }, { - "attribute": "in_app_via", + "name": "in_app_via", "value": "Push notifications" }, { - "attribute": "section", + "name": "section", "value": "3.3.1.4 Reporting Channels" } ] @@ -6279,11 +6279,11 @@ "content": "The flowchart shown below in fig.3.8 shows the multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "figures", + "name": "figures", "value": "Figure 3.7 and Figure 3.8 referenced; flowchart shown" }, { - "attribute": "section", + "name": "section", "value": "Figures/Illustrations" } ] @@ -6294,7 +6294,7 @@ "content": "The flowchart shown below in fig.3.8 shows the multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Figures/Illustrations" } ] @@ -6305,11 +6305,11 @@ "content": "A picture is entered into MobilenetSSD, which produces ( 3 , 300 , 300 ) scores.", "attributes": [ { - "attribute": "model", + "name": "model", "value": "MobilenetSSD" }, { - "attribute": "context", + "name": "context", "value": "Page 33, Architecture for activity detection and recognition" } ] @@ -6320,11 +6320,11 @@ "content": "Offset values from the default box ( 1 , 3000 , 4 𝑏𝑜𝑥𝑒𝑠 𝑎𝑛𝑑 ( 1 , 3000 , 21 ) ( 𝑐𝑥 , 𝑐𝑦 , 𝑤 , ℎ )are included in boxes.", "attributes": [ { - "attribute": "model", + "name": "model", "value": "default box offsets" }, { - "attribute": "context", + "name": "context", "value": "Page 33" } ] @@ -6335,11 +6335,11 @@ "content": "The 20 different item categories are represented by confidence levels in Scores, with 0 representing the backdrop.", "attributes": [ { - "attribute": "categories", + "name": "categories", "value": "20" }, { - "attribute": "backdrop", + "name": "backdrop", "value": "0" } ] @@ -6350,7 +6350,7 @@ "content": "For each item during training, SSD just requires a ground truth box and an input picture.", "attributes": [ { - "attribute": "process", + "name": "process", "value": "training" } ] @@ -6361,7 +6361,7 @@ "content": "The small set of default boxes were examined (for example, 4) with various aspect ratios at each position in several feature maps with various sizes (for example, and in (b) and (c)).", "attributes": [ { - "attribute": "default_boxes", + "name": "default_boxes", "value": "various aspect ratios across multiple feature maps" } ] @@ -6372,7 +6372,7 @@ "content": "The shape offsets and confidences for each 8 × 8 4 × 4default box for all object categories was forecast.", "attributes": [ { - "attribute": "forecast", + "name": "forecast", "value": "shape offsets and confidences for all object categories" } ] @@ -6383,7 +6383,7 @@ "content": "The first match of these ( ( 𝐶 1 , 𝐶 2 , \" . . , 𝐶 ) )default boxes to the ground truth boxes at training time. As an illustration in Figure 3.7, two default boxes with two persons in the frame is matched; these are considered positives, and the remaining default boxes are considered negatives.", "attributes": [ { - "attribute": "matching", + "name": "matching", "value": "positive/negative assignment during training" } ] @@ -6394,7 +6394,7 @@ "content": "The model loss is a weighted average of the localization loss and the confidence loss (e.g. Softmax).", "attributes": [ { - "attribute": "loss_components", + "name": "loss_components", "value": "localization and confidence (Softmax)" } ] @@ -6405,7 +6405,7 @@ "content": "Figure 3.7: Single Shot Detector (SSD) Framework", "attributes": [ { - "attribute": "figure", + "name": "figure", "value": "3.7" } ] @@ -6416,7 +6416,7 @@ "content": "3.3.1.4 Reporting Channels To receive updates on activities detected and recognized, Short Message Service (SMS), Email and In-App notifications are used as channels for receiving information.", "attributes": [ { - "attribute": "channels", + "name": "channels", "value": "SMS, Email, In-App" } ] @@ -6427,7 +6427,7 @@ "content": "This uses Twilio Application Program Interface (API) to send SMS from the script that’s being executed on the controller.", "attributes": [ { - "attribute": "integration", + "name": "integration", "value": "Twilio API" } ] @@ -6438,7 +6438,7 @@ "content": "Nodemailer and Push notifications were adapted for email and in-app notification.", "attributes": [ { - "attribute": "integration", + "name": "integration", "value": "Nodemailer; Push" } ] @@ -6449,7 +6449,7 @@ "content": "The flowchart shown below in fig.3.8 shows the multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "figure", + "name": "figure", "value": "3.8" } ] @@ -6460,7 +6460,7 @@ "content": "Figure 3.7: Single Shot Detector (SSD) Framework; The flowchart shown below in fig.3.8 shows the multi-function activity monitoring and reporting system.", "attributes": [ { - "attribute": "notes", + "name": "notes", "value": "Figure references on page" } ] @@ -6471,15 +6471,15 @@ "content": "To be able to update the data in realtime from the mobile interface to the controller and vice-versa, the Firebase Realtime Database was utilized, this is a database that is hosted in the cloud. Data is synced in real-time to the controller and saved as JSON. The Realtime Database instance is shared by the controller and the mobile interface which ensures that the device always have the most recent data available. Activities, camera information and tracking data are stored in the realtime database.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2.1 Firebase Realtime Database" }, { - "attribute": "source", + "name": "source", "value": "Page 34-35" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6490,15 +6490,15 @@ "content": "This project implements Cloud Storage for Firebase, a robust, user-friendly, and cost-efficient object storage solution created for Google scale. Regardless of network condition, the Firebase SDKs for Cloud Storage give Google security to file uploads and downloads for your Firebase apps. This stores all the videos recorded in the project and provide a publicly accessible link which can be accessed globally from anywhere in the world as in fig.3.9.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2.2 Firebase Cloud Storage" }, { - "attribute": "source", + "name": "source", "value": "Page 34-35" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6509,15 +6509,15 @@ "content": "In this design, a fully fletched mobile interface for iOS and Android is built to enable managing of monitored and reported activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3 Design of a mobile application for managing monitored and reported activities" }, { - "attribute": "source", + "name": "source", "value": "Page 34-35" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -6528,15 +6528,15 @@ "content": "Setting up for mobile development has been made easy and fast with expo. Expo is a React Native-based toolchain that assists in swiftly setting up applications.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "source", + "name": "source", "value": "Page 35" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -6547,15 +6547,15 @@ "content": "It also offers the Expo SDK, which can be used for some local mobile functions, like Barcode Scanner, MapView, ImagePicker, etc.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "source", + "name": "source", "value": "Page 35" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6566,15 +6566,15 @@ "content": "Using a bar chart to visualize data in this project, a chart is a graphical representation of information. React Native Chart Kit is a popular chart library that aids in presenting data in an engaging way. It has multiple chart symbols like bar charts, pie charts, line charts, etc. In this project, a stacked bar chart was used as a way of displaying the visualized data.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.2 React Native Chart Kit" }, { - "attribute": "source", + "name": "source", "value": "Page 35" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6585,15 +6585,15 @@ "content": "Rarely do mobile apps consist of only one screen. A navigator is often responsible for controlling how several displays are shown and switched between. A simple navigation solution is offered by React Navigation, which can display popular stack navigation and tabbed navigation patterns on both Android and iOS. fig.3.9 shows a flow diagram of React Navigation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.3 React Navigation" }, { - "attribute": "source", + "name": "source", "value": "Page 35" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -6604,15 +6604,15 @@ "content": "Figure 3.8: Flowchart for multi-function activity monitoring and reporting system", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Figure 3.8 reference" }, { - "attribute": "source", + "name": "source", "value": "Page 34" }, { - "attribute": "confidence", + "name": "confidence", "value": "low" } ] @@ -6623,15 +6623,15 @@ "content": "This stores all the videos recorded in the project and provide a publicly accessible link which can be accessed globally from anywhere in the world as in fig.3.9.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Figure 3.9 reference" }, { - "attribute": "source", + "name": "source", "value": "Page 34-35" }, { - "attribute": "confidence", + "name": "confidence", "value": "low" } ] @@ -6642,15 +6642,15 @@ "content": "Firebase services being known for its ease of use and scalability was adapted for this project to design a centralized database with a storage facility.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2" }, { - "attribute": "topic", + "name": "topic", "value": "centralized database architecture with storage facility" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6661,19 +6661,19 @@ "content": "To be able to update the data in realtime from the mobile interface to the controller and vice-versa, the Firebase Realtime Database was utilized, this is a database that is hosted in the cloud. Data is synced in real-time to the controller and saved as JSON. The Realtime Database instance is shared by the controller and the mobile interface which ensures that the device always have the most recent data available. Activities, camera information and tracking data are stored in the realtime database.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2.1" }, { - "attribute": "data_format", + "name": "data_format", "value": "JSON" }, { - "attribute": "data_type", + "name": "data_type", "value": "Realtime Database" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6684,19 +6684,19 @@ "content": "This project implements Cloud Storage for Firebase, a robust, user-friendly, and cost-efficient object storage solution created for Google scale. Regardless of network condition, the Firebase SDKs for Cloud Storage give Google security to file uploads and downloads for your Firebase apps.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2.2" }, { - "attribute": "storage_type", + "name": "storage_type", "value": "Cloud Storage" }, { - "attribute": "security", + "name": "security", "value": "Google security for uploads/downloads" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6707,15 +6707,15 @@ "content": "This stores all the videos recorded in the project and provide a publicly accessible link which can be accessed globally from anywhere in the world as in fig.3.9.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.2.2" }, { - "attribute": "topic", + "name": "topic", "value": "video storage and public links" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6726,15 +6726,15 @@ "content": "In this design, a fully fletched mobile interface for iOS and Android is built to enable managing of monitored and reported activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3" }, { - "attribute": "topic", + "name": "topic", "value": "mobile application design" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6745,15 +6745,15 @@ "content": "Figure 3.8: Flowchart for multi-function activity monitoring and reporting system", "attributes": [ { - "attribute": "figure", + "name": "figure", "value": "3.8" }, { - "attribute": "type", + "name": "type", "value": "caption" }, { - "attribute": "source", + "name": "source", "value": "Page 35" } ] @@ -6764,19 +6764,19 @@ "content": "Expo is a React Native-based toolchain that assists in swiftly setting up applications.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.95" } ] @@ -6787,19 +6787,19 @@ "content": "It provides the user interface and service components that are generally present in third-party Native React Native components as well as a variety of tools for the process of creating and testing Native React apps.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6810,19 +6810,19 @@ "content": "It also offers the Expo SDK, which can be used for some local mobile functions, like Barcode Scanner, MapView, ImagePicker, etc.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6833,19 +6833,19 @@ "content": "In this project, it’s adapted as the framework to kickstart the mobile interface.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.1 Expo" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6856,19 +6856,19 @@ "content": "React Native Chart Kit is a popular chart library that aids in presenting data in an engaging way. It has multiple chart symbols like bar charts, pie charts, line charts, e.t.c.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.2 React Native Chart Kit" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6879,19 +6879,19 @@ "content": "In this project, a stacked bar chart was used as a way of displaying the visualized data.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.2 React Native Chart Kit" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6902,19 +6902,19 @@ "content": "A simple navigation solution is offered by React Navigation, which can display popular stack navigation and tabbed navigation patterns on both Android and iOS.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.3 React Navigation" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6925,19 +6925,19 @@ "content": "fig.3.9 shows a flow diagram of React Navigation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.3 React Navigation" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -6948,19 +6948,19 @@ "content": "Rarely do mobile apps consist of only one screen.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.3.3.3 React Navigation" }, { - "attribute": "page", + "name": "page", "value": "36" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.8" } ] @@ -6971,7 +6971,7 @@ "content": "3.4 Bill of Engineering Measurement and Evaluation (BEME) The design of multi-function activity reporting and monitoring system was not cost-effective due to the inflation and high cost of materials.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.4 BEME" } ] @@ -6982,7 +6982,7 @@ "content": "Table 3.1: The bill for the multi-function activity reporting and monitoring system S/N Component Description Quantity Cost ( N ) 1 Raspberry Pi Model 3 1 100,000 2 Pi Camera 1080p HD Webcam 5MP OV5647 Sensor 1 10,000 Total 110,000", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Table 3.1; 3.4" } ] @@ -6993,7 +6993,7 @@ "content": "3.5 Major Research Methods and its Findings In a view of considering activities detection and recognition, activities were obtained by the digital cameras and the video feed which was processed to get the activity information using Artificial Intelligence. Bounding boxes on this information were located using Single Shot Detection system (SSD). Manual samples from health and security were acquired using mobilenets.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3.5 Major Research Methods and its Findings" } ] @@ -7004,7 +7004,7 @@ "content": "Figure 3.9: Illustration of React Navigation", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Figure caption" } ] @@ -7015,7 +7015,7 @@ "content": "Bounding boxes on this information were located using Single Shot Detection system (SSD).", "attributes": [ { - "attribute": "method", + "name": "method", "value": "SSD" } ] @@ -7026,7 +7026,7 @@ "content": "Manual samples from health and security were acquired using mobilenets.", "attributes": [ { - "attribute": "technology", + "name": "technology", "value": "MobileNet" } ] @@ -7037,11 +7037,11 @@ "content": "This chapter discusses and evaluates the results obtained from the implemented methodology of Chapter three. Given the initial stated parameters, these serves to evaluate the performance of the system and quality of service of the system.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.0 Overview" }, { - "attribute": "chapter", + "name": "chapter", "value": "Chapter 4" } ] @@ -7052,11 +7052,11 @@ "content": "Diverse locations were used within Keffi, Abuja and Minna for the model testing.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.0 Overview" }, { - "attribute": "location_set", + "name": "location_set", "value": "Keffi, Abuja, Minna" } ] @@ -7067,11 +7067,11 @@ "content": "Images and real-time video feeds were used to test the algorithm to detect activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "test_mode", + "name": "test_mode", "value": "real-time video feeds" } ] @@ -7082,11 +7082,11 @@ "content": "The video feeds being a hospital room scene and a home room, it was prepared to test how intelligent the model was from every angle.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "test_scenes", + "name": "test_scenes", "value": "hospital room, home room" } ] @@ -7097,11 +7097,11 @@ "content": "Camera was placed at the top and center of the room and each result gave different accuracy level.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "camera_position", + "name": "camera_position", "value": "top and center" } ] @@ -7112,11 +7112,11 @@ "content": "As for the motion detection, accuracy level was almost the same at multiple camera location but for the other activities, positioning the camera at the center produced a better and more accurate prediction.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "analysis", + "name": "analysis", "value": "motion_detection_vs_other_activities" } ] @@ -7127,11 +7127,11 @@ "content": "This work was implemented using a Raspberry Pi controller and a 1080P Pi Camera and the result was documented as in Table 4.1.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "hardware", + "name": "hardware", "value": "Raspberry Pi + 1080P Pi Camera" } ] @@ -7142,11 +7142,11 @@ "content": "Table 4.1: Performance of the A.I at different positions in a room Location Position Activities Success Rate Hospital Top Motion 90% Home Center Motion 95% Hospital Top Posture 45% Home Center Posture 70%", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "table", + "name": "table", "value": "Table 4.1" } ] @@ -7157,11 +7157,11 @@ "content": "The Raspberry Pi is designed to operate at 5 volts with a tolerance of 5%. (4.75 - 5.25 volts). The Pi won't turn on if you give less voltage than is necessary.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "power_spec", + "name": "power_spec", "value": "5V ±5%" } ] @@ -7172,11 +7172,11 @@ "content": "It receives up to 30 fps feed from the Pi Camera which is processed by the software part of the project.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "fps", + "name": "fps", "value": "up to 30 fps" } ] @@ -7187,11 +7187,11 @@ "content": "to test how intelligent the model was from every angle.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "note", + "name": "note", "value": "model_intelligence_evaluation" } ] @@ -7202,11 +7202,11 @@ "content": "Home Center Motion 95%", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2" }, { - "attribute": "table_entry", + "name": "table_entry", "value": "Home Center Motion 95%" } ] @@ -7217,23 +7217,23 @@ "content": "The motion detection system as shown in fig.4.1 and fig.4.2. This is what the Raspberry Pi Camera sees. It detects and draws a rectangle around the detected object, in this case, a human. The detection works with time set by the user of the mobile interface. Motion is being detected only when the time interval set is within the current time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.1 Motion Detection" }, { - "attribute": "camera", + "name": "camera", "value": "Raspberry Pi Camera" }, { - "attribute": "figures", + "name": "figures", "value": "fig.4.1 and fig.4.2" }, { - "attribute": "time_control", + "name": "time_control", "value": "time interval set by user via mobile interface" }, { - "attribute": "restriction", + "name": "restriction", "value": "detection only when interval is within current time" } ] @@ -7244,19 +7244,19 @@ "content": "4.2.2 Posture Recognition To detect activities such as lying down, sitting and standing, posture recognition was used as described in the use-case and flow chart diagrams in chapter 3. This performs well if the whole body of the person is captured within the camera. It’s also noticed that accuracy skyrocketed if the face of the person is visible to the camera which serves as a good guide to other key points. This is shown in fig.4.3.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.2 Posture Recognition" }, { - "attribute": "condition_body", + "name": "condition_body", "value": "whole body captured within the camera" }, { - "attribute": "face_visibility_effect", + "name": "face_visibility_effect", "value": "accuracy skyrocketed if the face is visible" }, { - "attribute": "reference_figure", + "name": "reference_figure", "value": "fig.4.3" } ] @@ -7267,11 +7267,11 @@ "content": "Figure 4.3: Body Posture Detection of a person sitting down", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.2 Posture Recognition" }, { - "attribute": "figure", + "name": "figure", "value": "fig.4.3" } ] @@ -7282,11 +7282,11 @@ "content": "Realtime communication was needed for a swift synchronization between the hardware prototype and the mobile interface.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3 Results and Discussion" }, { - "attribute": "topic", + "name": "topic", "value": "real-time sync between hardware and mobile interface" } ] @@ -7297,15 +7297,15 @@ "content": "Firebase realtime database was tested to achieve this with a database schema that tracks activities created and monitored.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3 Results and Discussion" }, { - "attribute": "database", + "name": "database", "value": "Firebase Realtime Database" }, { - "attribute": "schema", + "name": "schema", "value": "tracks activities created and monitored" } ] @@ -7316,15 +7316,15 @@ "content": "With 1GB storage limit capacity in the Cloud Firebase Storage, videos of recorded tracked activities were securely stored on the cloud.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3 Results and Discussion" }, { - "attribute": "storage", + "name": "storage", "value": "1GB limit in Cloud Firebase Storage" }, { - "attribute": "security", + "name": "security", "value": "videos securely stored on the cloud" } ] @@ -7335,19 +7335,19 @@ "content": "To detect activities such as lying down, sitting and standing, posture recognition was used as described in the use-case and flow chart diagrams in chapter 3.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.2 Posture Recognition" }, { - "attribute": "source", + "name": "source", "value": "Page 40" }, { - "attribute": "related_section", + "name": "related_section", "value": "use-case and flow chart diagrams in chapter 3" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7358,15 +7358,15 @@ "content": "This performs well if the whole body of the person is captured within the camera. It’s also noticed that accuracy skyrocketed if the face of the person is visible to the camera which serves as a good guide to other key points. This is shown in fig.4.3.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.2 Posture Recognition" }, { - "attribute": "figure", + "name": "figure", "value": "Fig. 4.3" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7377,15 +7377,15 @@ "content": "Figure 4.3: Body Posture Detection of a person sitting down", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.2.2 Posture Recognition" }, { - "attribute": "figure", + "name": "figure", "value": "4.3" }, { - "attribute": "note", + "name": "note", "value": "Caption/visual reference" } ] @@ -7396,11 +7396,11 @@ "content": "4.3 Results and Discussion for centralized database architecture with storage scheme", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -7411,15 +7411,15 @@ "content": "Realtime communication was needed for a swift synchronization between the hardware prototype and the mobile interface.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3" }, { - "attribute": "topic", + "name": "topic", "value": "Real-time synchronization" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7430,15 +7430,15 @@ "content": "Firebase realtime database was tested to achieve this with a database schema that tracks activities created and monitored.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3" }, { - "attribute": "technology", + "name": "technology", "value": "Firebase Realtime Database" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7449,15 +7449,15 @@ "content": "With 1GB storage limit capacity in the Cloud Firebase Storage, videos of recorded tracked activities were securely stored on the cloud.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3" }, { - "attribute": "storage", + "name": "storage", "value": "Cloud Firebase Storage 1 GB limit" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7468,15 +7468,15 @@ "content": "This project utilized a simple structure schema for the realtime database.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Firebase Realtime Database (4.3.1)" }, { - "attribute": "page", + "name": "page", "value": "41" }, { - "attribute": "source", + "name": "source", "value": "Page 41" } ] @@ -7487,19 +7487,19 @@ "content": "An activities collection which has the activities that are created by a certain user as shown in fig.4.5.", "attributes": [ { - "attribute": "collection", + "name": "collection", "value": "activities" }, { - "attribute": "purpose", + "name": "purpose", "value": "store activities created by a user" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.5" }, { - "attribute": "page", + "name": "page", "value": "41" } ] @@ -7510,19 +7510,19 @@ "content": "The camera collection stores all active camera data with the activities the camera is supposed to track as shown in fig.4.6.", "attributes": [ { - "attribute": "collection", + "name": "collection", "value": "camera" }, { - "attribute": "purpose", + "name": "purpose", "value": "store active camera data and associated tracked activities" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.6" }, { - "attribute": "page", + "name": "page", "value": "41" } ] @@ -7533,19 +7533,19 @@ "content": "The tracked collection serves as a notification collection, this stores the activity information that is detected as shown in fig.4.7.", "attributes": [ { - "attribute": "collection", + "name": "collection", "value": "tracked" }, { - "attribute": "purpose", + "name": "purpose", "value": "store detected activity information as notifications" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.7" }, { - "attribute": "page", + "name": "page", "value": "41" } ] @@ -7556,15 +7556,15 @@ "content": "Figure 4.5: Activities collection table on Firebase Figure 4.6: Camera collection table on Firebase Figure 4.7: Tracked collection table on Firebase", "attributes": [ { - "attribute": "figures", + "name": "figures", "value": "Fig.4.5, Fig.4.6, Fig.4.7" }, { - "attribute": "page", + "name": "page", "value": "41" }, { - "attribute": "section", + "name": "section", "value": "Firebase Realtime Database (4.3.1)" } ] @@ -7575,19 +7575,19 @@ "content": "This project utilized a simple structure schema for the realtime database.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3.1 Firebase Realtime database" }, { - "attribute": "page", + "name": "page", "value": "42" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7598,23 +7598,23 @@ "content": "An activities collection which has the activities that are created by a certain user as shown in fig.4.5.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3.1 Firebase Realtime database" }, { - "attribute": "page", + "name": "page", "value": "42" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" }, { - "attribute": "figures", + "name": "figures", "value": "fig.4.5" } ] @@ -7625,23 +7625,23 @@ "content": "The camera collection stores all active camera data with the activities the camera is supposed to track as shown in fig.4.6.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3.1 Firebase Realtime database" }, { - "attribute": "page", + "name": "page", "value": "42" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" }, { - "attribute": "figures", + "name": "figures", "value": "fig.4.6" } ] @@ -7652,23 +7652,23 @@ "content": "The tracked collection serves as a notification collection, this stores the activity information that is detected as shown in fig.4.7.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3.1 Firebase Realtime database" }, { - "attribute": "page", + "name": "page", "value": "42" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" }, { - "attribute": "figures", + "name": "figures", "value": "fig.4.7" } ] @@ -7679,19 +7679,19 @@ "content": "Figure 4.5: Activities collection table on Firebase Figure 4.6: Camera collection table on Firebase 42", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.3.1 Firebase Realtime database" }, { - "attribute": "page", + "name": "page", "value": "42" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7702,27 +7702,27 @@ "content": "The introduction screen welcomes user to the application as seen in fig.4.8, this has a good user experience (UX) as the user understands briefly what the application is built to do. It also provides call to action (CTA) that allows the user to login, register or retrieve their password.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "43" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.8" }, { - "attribute": "topic", + "name": "topic", "value": "Introduction Screen" }, { - "attribute": "source", + "name": "source", "value": "Mobile application 4.x (section 4.4)" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7733,23 +7733,23 @@ "content": "this has a good user experience (UX) as the user understands briefly what the application is built to do.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "43" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "positive" }, { - "attribute": "source", + "name": "source", "value": "Page 43, Introduction Screen" }, { - "attribute": "confidence", + "name": "confidence", "value": "medium" } ] @@ -7760,23 +7760,23 @@ "content": "It also provides call to action (CTA) that allows the user to login, register or retrieve their password.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "43" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.8" }, { - "attribute": "source", + "name": "source", "value": "Introduction Screen text" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7787,27 +7787,27 @@ "content": "To be able to access the monitoring and reporting aspects of the system, an authentication is needed which is performed by the login screen. A user needs to validate he has access to this application before being granted access. Hence, the interface shown in fig.4.9 handles the authentication process.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.2 Login Screen" }, { - "attribute": "page", + "name": "page", "value": "43" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.9" }, { - "attribute": "topic", + "name": "topic", "value": "Authentication" }, { - "attribute": "source", + "name": "source", "value": "Page 43, 4.4.2" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7818,23 +7818,23 @@ "content": "This allows new users create an account to be able to access the system. This asks series of details to get user information for validation and verification. The screen shown below in fig.5.0 handles the registration mechanism.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.3 Registration Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.5.0" }, { - "attribute": "source", + "name": "source", "value": "Registration Screen text" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7845,15 +7845,15 @@ "content": "Figure 4.8: Introduction Screen and Figure 4.9: Login Screen are shown; text for registration mentions fig.5.0, which may indicate a mismatch or typographical error.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4 (Screens)" }, { - "attribute": "page", + "name": "page", "value": "43-44" }, { - "attribute": "notes", + "name": "notes", "value": "Possible figure-numbering inconsistency (fig.5.0 vs fig.4.x)" } ] @@ -7864,19 +7864,19 @@ "content": "The introduction screen welcomes user to the application as seen in fig.4.8, this has a good user experience (UX) as the user understands briefly what the application is built to do. It also provides call to action (CTA) that allows the user to login, register or retrieve their password.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.8" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7887,19 +7887,19 @@ "content": "The introduction screen welcomes user to the application as seen in fig.4.8, this has a good user experience (UX) as the user understands briefly what the application is built to do.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.8" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7910,19 +7910,19 @@ "content": "To be able to access the monitoring and reporting aspects of the system, an authentication is needed which is performed by the login screen.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.2 Login Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.9" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7933,15 +7933,15 @@ "content": "A user needs to validate he has access to this application before being granted access.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.2 Login Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -7952,15 +7952,15 @@ "content": "Figure 4.8: Introduction Screen", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.1 Introduction Screen" }, { - "attribute": "page", + "name": "page", "value": "44" }, { - "attribute": "figure", + "name": "figure", "value": "Fig.4.8" } ] @@ -7971,15 +7971,15 @@ "content": "Figure 4.10: Registration Screen", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.x Screens (Registration)" }, { - "attribute": "page", + "name": "page", "value": "45" }, { - "attribute": "figure", + "name": "figure", "value": "4.10" } ] @@ -7990,15 +7990,15 @@ "content": "4.4.4 Forget Password Screen This forget password screen is shown in fig.4.11 serves as an interface where users can reset their lost login credentials in order to regain access to the application.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.4 Forget Password Screen" }, { - "attribute": "page", + "name": "page", "value": "45" }, { - "attribute": "figure", + "name": "figure", "value": "4.11" } ] @@ -8009,15 +8009,15 @@ "content": "Figure 4.11: Forgot Password Screen", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.4 Forget Password Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "figure", + "name": "figure", "value": "4.11" } ] @@ -8028,11 +8028,11 @@ "content": "4.4.5 Home Screen This serves as the main interface for this application. It serves as a connector to all the other screens.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" } ] @@ -8043,15 +8043,15 @@ "content": "Users can see activities and reports with visual statistical data such as the bar chart as seen in fig.4.12.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "visualization", + "name": "visualization", "value": "bar chart" } ] @@ -8062,15 +8062,15 @@ "content": "It also displays recent activities reported with the time and percentage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "aspect", + "name": "aspect", "value": "recent activities with time and percentage" } ] @@ -8081,15 +8081,15 @@ "content": "This page is only shown to authenticated users, i.e., those who have passed the login validation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "security", + "name": "security", "value": "authenticated users only" } ] @@ -8100,23 +8100,23 @@ "content": "4.4.5 Home Screen This serves as the main interface for this application. It serves as a connector to all the other screens.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "figure", + "name": "figure", "value": "fig.4.12" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8127,23 +8127,23 @@ "content": "Users can see activities and reports with visual statistical data such as the bar chart as seen in fig.4.12.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "figure", + "name": "figure", "value": "fig.4.12" }, { - "attribute": "topic", + "name": "topic", "value": "data visualization" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8154,15 +8154,15 @@ "content": "It also displays recent activities reported with the time and percentage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8173,15 +8173,15 @@ "content": "This page is only shown to authenticated users, i.e., those who have passed the login validation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5 Home Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8192,23 +8192,23 @@ "content": "4.4.6 Create Activity Screen This serves as an interface for tracking activities as shown in fig.4.13 and fig.4.14.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "figure", + "name": "figure", "value": "fig.4.13, fig.4.14" }, { - "attribute": "source", + "name": "source", "value": "Document" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8219,15 +8219,15 @@ "content": "A user selects the activity to track,", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8238,15 +8238,15 @@ "content": "set a time duration which is optional", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8257,15 +8257,15 @@ "content": "a report channel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8276,15 +8276,15 @@ "content": "The time duration makes it that activities are only tracked within a certain picked time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "46" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8295,23 +8295,23 @@ "content": "4.4.5 Home Screen This serves as the main interface for this application. It serves as a connector to all the other screens. Users can see activities and reports with visual statistical data such as the bar chart as seen in fig.4.12. It also displays recent activities reported with the time and percentage. This page is only shown to authenticated users, i.e., those who have passed the login validation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.5" }, { - "attribute": "page", + "name": "page", "value": "46-47" }, { - "attribute": "figure", + "name": "figure", "value": "fig.4.12" }, { - "attribute": "topic", + "name": "topic", "value": "Home Screen" }, { - "attribute": "authentication", + "name": "authentication", "value": "required" } ] @@ -8322,19 +8322,19 @@ "content": "4.4.6 Create Activity Screen This serves as an interface for tracking activities as shown in fig.4.13 and fig.4.14. A user selects the activity to track, set a time duration which is optional and a report channel. The time duration makes it that activities are only tracked within a certain picked time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6" }, { - "attribute": "page", + "name": "page", "value": "46-47" }, { - "attribute": "figures", + "name": "figures", "value": "fig.4.13 and fig.4.14" }, { - "attribute": "input_options", + "name": "input_options", "value": "activity to track; time duration (optional); report channel" } ] @@ -8345,19 +8345,19 @@ "content": "A user selects the activity to track", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "47" }, { - "attribute": "figure_referenced", + "name": "figure_referenced", "value": "fig.4.13, fig.4.14" }, { - "attribute": "confidence", + "name": "confidence", "value": "0.9" } ] @@ -8368,15 +8368,15 @@ "content": "set a time duration which is optional", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "47" }, { - "attribute": "attribute", + "name": "attribute", "value": "time duration" } ] @@ -8387,11 +8387,11 @@ "content": "and a report channel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "47" } ] @@ -8402,11 +8402,11 @@ "content": "The time duration makes it that activities are only tracked within a certain picked time.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "47" } ] @@ -8417,15 +8417,15 @@ "content": "Create Activity Inactive state; Create Activity Active state", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "48-49" }, { - "attribute": "figure_referenced", + "name": "figure_referenced", "value": "fig.4.13, fig.4.14" } ] @@ -8436,15 +8436,15 @@ "content": "4.4.7 Activities Screen ... lists all the activities that are being tracked by a user.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.7 Activities Screen" }, { - "attribute": "page", + "name": "page", "value": "50" }, { - "attribute": "figure_reference", + "name": "figure_reference", "value": "Figure 4.15" } ] @@ -8455,11 +8455,11 @@ "content": "Create Activity Inactive state/Active state", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.6 Create Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "48-49" } ] @@ -8470,11 +8470,11 @@ "content": "4.4.7Activities Screen This screen as shown in figure 4.15 lists all the activities that are being tracked by a user.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.7 Activities Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8485,11 +8485,11 @@ "content": "4.4.8 Activity Screen This displays information about a specific activity that is being tracked. It shows the name of the activity, how many times it has been reported and 30 seconds video playback of the captured activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50-51" } ] @@ -8500,11 +8500,11 @@ "content": "4.5 Summary of Result and Discussion Precise findings from the hardware prototype that were provided to the application for monitoring and tracking were acquired with a view to considering actions. Using Single Shot Detection (SSD) to locate the bounding boxes, the OpenCV library was used to draw them.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.5 Summary of Result and Discussion" }, { - "attribute": "page", + "name": "page", "value": "52" } ] @@ -8515,11 +8515,11 @@ "content": "It was also possible to detect, identify, and analyze a person’s position in order to accurately forecast postural activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.5 Summary of Result and Discussion" }, { - "attribute": "page", + "name": "page", "value": "52" } ] @@ -8530,11 +8530,11 @@ "content": "5.0 CONCLUSION The design and development of the A.I enabled multi-function-based activity monitoring and reporting system was a success.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8545,11 +8545,11 @@ "content": "The system was developed using OpenCV Deep Neural Network (DNN) module and Firebase tooling.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8560,11 +8560,11 @@ "content": "The video feed obtained from the Pi Camera triggers the process of detecting and recognition of activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8575,11 +8575,11 @@ "content": "The controller constantly communicates with the realtime database to check for up-to date activities to be tracked.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8590,11 +8590,11 @@ "content": "It performs motion detection using MobileNetSSD and Posture recognition using Posenet.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8605,11 +8605,11 @@ "content": "Recognized activities are then sent over to firebase realtime database for tracking and cloud storage for storage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8620,11 +8620,11 @@ "content": "The system as design was able to monitor and report multiple activities without human intervention, therefore reduces the stress of deploying humans at hospitals or security gates at homes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "page", + "name": "page", "value": "53" } ] @@ -8635,11 +8635,11 @@ "content": "5.2 RECOMMENDATION The designed system met its stated objectives and uncovered the following are some areas for improvement in future work: i. Build support for bad lightening or low camera quality ii. The inclusion of night vision camera to allow the activity recognition model to detect and recognize activities even at night. iii. Live video feeds from the camera should be accessible via the mobile application iv. Camera switching should be made possible from the mobile application.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2 Recommendations" }, { - "attribute": "page", + "name": "page", "value": "53-54" } ] @@ -8650,11 +8650,11 @@ "content": "4.4.8 Activity Screen This displays information about a specific activity that is being tracked. It shows the name of the activity, how many times it has been reported and 30 seconds video playback of the captured activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8665,11 +8665,11 @@ "content": "It shows the name of the activity,", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8680,11 +8680,11 @@ "content": "how many times it has been reported", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8695,11 +8695,11 @@ "content": "30 seconds video playback of the captured activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8710,11 +8710,11 @@ "content": "The interface is shown below in fig.4.16.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8725,11 +8725,11 @@ "content": "Figure 4.15: Activities Screen 4.4.8 Activity Screen This displays information about a specific activity that is being tracked. It shows the name of the activity, how many times it has been reported and 30 seconds video playback of the captured activity. The interface is shown below in fig.4.16.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8740,11 +8740,11 @@ "content": "It shows the name of the activity, how many times it has been reported", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.4.8 Activity Screen" }, { - "attribute": "page", + "name": "page", "value": "50" } ] @@ -8755,19 +8755,19 @@ "content": "Using Single Shot Detection (SSD) to locate the bounding boxes, the OpenCV library was used to draw them.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.5 Summary of Result and Discussion" }, { - "attribute": "source_page", + "name": "source_page", "value": "51" }, { - "attribute": "technologies", + "name": "technologies", "value": "SSD, OpenCV" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8778,19 +8778,19 @@ "content": "It was also possible to detect, identify, and analyze a person\\'s position in order to accurately forecast postural activity.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4.5 Summary of Result and Discussion" }, { - "attribute": "source_page", + "name": "source_page", "value": "51" }, { - "attribute": "topic", + "name": "topic", "value": "postural activity forecasting" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8801,19 +8801,19 @@ "content": "The design and development of the A.I enabled multi-function-based activity monitoring and reporting system was a success.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "tone", + "name": "tone", "value": "positive" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8824,19 +8824,19 @@ "content": "The system was developed using OpenCV Deep Neural Network (DNN) module and Firebase tooling.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "technologies", + "name": "technologies", "value": "OpenCV DNN, Firebase" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8847,19 +8847,19 @@ "content": "The video feed obtained from the Pi Camera triggers the process of detecting and recognition of activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "camera", + "name": "camera", "value": "Pi Camera" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8870,19 +8870,19 @@ "content": "The controller constantly communicates with the realtime database to check for up-to-date activities to be tracked.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "component", + "name": "component", "value": "controller" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8893,19 +8893,19 @@ "content": "It performs motion detection using MobileNetSSD and Posture recognition using Posenet.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "models", + "name": "models", "value": "MobileNetSSD, PoseNet" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8916,19 +8916,19 @@ "content": "Recognized activities are then sent over to firebase realtime database for tracking and cloud storage for storage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "storage", + "name": "storage", "value": "Firebase realtime database, cloud storage" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8939,19 +8939,19 @@ "content": "The system as design was able to monitor and report multiple activities without human intervention, therefore reduces the stress of deploying humans at hospitals or security gates at homes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.0 Conclusion" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "impact", + "name": "impact", "value": "reduces human deployment at hospitals/security gates" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8962,19 +8962,19 @@ "content": "i. Build support for bad lightening or low camera quality", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2 Recommendations" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "topic", + "name": "topic", "value": "lighting conditions" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -8985,19 +8985,19 @@ "content": "ii. The inclusion of night vision camera to allow the activity recognition model to detect and recognize activities even at night.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2 Recommendations" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "topic", + "name": "topic", "value": "night vision" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9008,19 +9008,19 @@ "content": "iii. Live video feeds from the camera should be accessible via the mobile application", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2 Recommendations" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "topic", + "name": "topic", "value": "mobile app access to feeds" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9031,19 +9031,19 @@ "content": "iv. Camera switching should be made possible from the mobile application.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2 Recommendations" }, { - "attribute": "source_page", + "name": "source_page", "value": "52" }, { - "attribute": "topic", + "name": "topic", "value": "camera switching" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9054,11 +9054,11 @@ "content": "The system was developed using OpenCV Deep Neural Network (DNN) module and Firebase tooling.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9069,11 +9069,11 @@ "content": "The system was designed and developed in accordance with the objectives in Section 1.3, the outlined methodologies in Section 3.2, the overall design and flow explained in Section 3.3.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9084,11 +9084,11 @@ "content": "The video feed obtained from the Pi Camera triggers the process of detecting and recognition of activities.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9099,11 +9099,11 @@ "content": "The controller constantly communicates with the realtime database to check for up-to-date activities to be tracked.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9114,11 +9114,11 @@ "content": "It performs motion detection using MobileNetSSD and Posture recognition using Posenet.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9129,11 +9129,11 @@ "content": "Recognized activities are then sent over to firebase realtime database for tracking and cloud storage for storage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9144,11 +9144,11 @@ "content": "The system as design was able to monitor and report multiple activities without human intervention, therefore reduces the stress of deploying humans at hospitals or security gates at homes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.1" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9159,11 +9159,11 @@ "content": "i. Build support for bad lightening or low camera quality", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9174,11 +9174,11 @@ "content": "ii. The inclusion of night vision camera to allow the activity recognition model to detect and recognize activities even at night.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9189,11 +9189,11 @@ "content": "iii. Live video feeds from the camera should be accessible via the mobile application", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9204,11 +9204,11 @@ "content": "iv. Camera switching should be made possible from the mobile application.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "5.2" }, { - "attribute": "source", + "name": "source", "value": "Chapter 5" } ] @@ -9219,39 +9219,39 @@ "content": "Chaaraoui, A. A., Padilla-López, J. R., Ferrández-Pastor, F. J., Nieto-Hidalgo, M., & Flórez-Revuelta, F. (2014). A vision-based system for intelligent monitoring: Human behaviour analysis and privacy by context. A Vision-Based System for Intelligent Monitoring: Human Behaviour Analysis and Privacy by Context Alexandros, 14(5), 8895–8925. https://doi.org/10.3390/s140508895", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Sensors (2014) 14:8895-8925" }, { - "attribute": "authors", + "name": "authors", "value": "Chaaraoui A.A.; Padilla-López J.R.; Ferrández-Pastor F.J.; Nieto-Hidalgo M.; Flórez-Revuelta F." }, { - "attribute": "year", + "name": "year", "value": "2014" }, { - "attribute": "title", + "name": "title", "value": "A vision-based system for intelligent monitoring: Human behaviour analysis and privacy by context" }, { - "attribute": "venue", + "name": "venue", "value": "Sensors" }, { - "attribute": "pages", + "name": "pages", "value": "8895-8925" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.3390/s140508895" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9262,39 +9262,39 @@ "content": "Deng, J., Dong, W., Socher, R., Li, L. J., Li, K., & Fei-Fei, L. (2009). Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition (pp. 248-255).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "IEEE CVPR 2009" }, { - "attribute": "authors", + "name": "authors", "value": "Deng J.; Dong W.; Socher R.; Li L.J.; Li K.; Fei-Fei L." }, { - "attribute": "year", + "name": "year", "value": "2009" }, { - "attribute": "title", + "name": "title", "value": "Imagenet: A large-scale hierarchical image database" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2009" }, { - "attribute": "pages", + "name": "pages", "value": "248-255" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9305,39 +9305,39 @@ "content": "Mishra, C., & Gupta, D. L. (2017). Deep Machine Learning and Neural Networks: An Overview. IAES International Journal of Artificial Intelligence (IJ-AI), 6(2), 66-73. https://doi.org/10.11591/ijai.v6.i2.pp66-73", "attributes": [ { - "attribute": "source", + "name": "source", "value": "IJ-AI (2017) 6(2) 66-73" }, { - "attribute": "authors", + "name": "authors", "value": "Mishra C.; Gupta D.L." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Deep Machine Learning and Neural Networks: An Overview" }, { - "attribute": "venue", + "name": "venue", "value": "IAES IJ-AI" }, { - "attribute": "pages", + "name": "pages", "value": "66-73" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.11591/ijai.v6.i2.pp66-73" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9348,39 +9348,39 @@ "content": "Tee, K. S., Zulkifli, A. H. B., & Soon, C. F. (2015). An activity monitoring system for elderly. ARPN Journal of Engineering and Applied Sciences, 10(18), 8467–8472.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "ARPN Journal of Engineering and Applied Sciences (2015) 10(18) 8467-8472" }, { - "attribute": "authors", + "name": "authors", "value": "Tee K.S.; Zulkifli A.H.B.; Soon C.F." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "An activity monitoring system for elderly" }, { - "attribute": "venue", + "name": "venue", "value": "ARPN Journal of Engineering and Applied Sciences" }, { - "attribute": "pages", + "name": "pages", "value": "8467-8472" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9391,39 +9391,39 @@ "content": "Wang, P. (2019). On Defining Artificial Intelligence. Journal of Artificial General Intelligence , 10 (2), 1–37. https://doi.org/10.2478/jagi-2019-0002", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Artificial General Intelligence (2019) 10(2) 1-37" }, { - "attribute": "author", + "name": "author", "value": "Wang P." }, { - "attribute": "year", + "name": "year", "value": "2019" }, { - "attribute": "title", + "name": "title", "value": "On Defining Artificial Intelligence" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Artificial General Intelligence" }, { - "attribute": "pages", + "name": "pages", "value": "1-37" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.2478/jagi-2019-0002" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9434,39 +9434,39 @@ "content": "Attaran, M., & Deb, P. (2018). Machine Learning : The New ’ Big Thing ’ for Competitive Advantage International Journal of Knowledge Engineering and Data Mining, 5(4), 277-305. https://doi.org/10.1504/IJKEDM.2018.10015621.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "IJKEDM (2018) 5(4) 277-305" }, { - "attribute": "authors", + "name": "authors", "value": "Attaran M.; Deb P." }, { - "attribute": "year", + "name": "year", "value": "2018" }, { - "attribute": "title", + "name": "title", "value": "Machine Learning: The New 'Big Thing' for Competitive Advantage" }, { - "attribute": "venue", + "name": "venue", "value": "International Journal of Knowledge Engineering and Data Mining (IJKEDM)" }, { - "attribute": "pages", + "name": "pages", "value": "277-305" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.1504/IJKEDM.2018.10015621" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9477,39 +9477,39 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "author", + "name": "author", "value": "Girshick R." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1440-1448" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9520,39 +9520,39 @@ "content": "Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2017). Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2117-2125).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2017" }, { - "attribute": "authors", + "name": "authors", "value": "Lin T.Y.; Dollár P.; Girshick R.; He K.; Hariharan B.; Belongie S." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Feature pyramid networks for object detection" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2017" }, { - "attribute": "pages", + "name": "pages", "value": "2117-2125" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9563,39 +9563,39 @@ "content": "Lu, S., Christie, G. A., Nguyen, T. T., Freeman, J. D, and Hsu, E. B., (2021). Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies. Disaster Medicine and Public Health Preparedness, 1-8.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Disaster Medicine and Public Health Preparedness (2021) 1-8" }, { - "attribute": "authors", + "name": "authors", "value": "Lu S.; Christie G.A.; Nguyen T.T.; Freeman J.D.; Hsu E.B." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies" }, { - "attribute": "venue", + "name": "venue", "value": "Disaster Medicine and Public Health Preparedness" }, { - "attribute": "pages", + "name": "pages", "value": "1-8" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9606,39 +9606,39 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409. https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Geographic Information System (2021) 13:395-409" }, { - "attribute": "authors", + "name": "authors", "value": "Malaainine M.E.I.; Lechgar H.; Rhinane H." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "YOLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Geographic Information System" }, { - "attribute": "pages", + "name": "pages", "value": "395-409" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.4236/jgis.2021.134022" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9649,39 +9649,39 @@ "content": "Kruspe, A., Kersten, J., & Klan, F. (2021). Review article: Detection of actionable tweets in crisis events. Natural Hazards Earth System Science., 21, 1825–1845. https://doi.org/10.5194/nhess-21-1825-2021.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "NHESS (2021) 21:1825-1845" }, { - "attribute": "authors", + "name": "authors", "value": "Kruspe A.; Kersten J.; Klan F." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Detection of actionable tweets in crisis events" }, { - "attribute": "venue", + "name": "venue", "value": "Natural Hazards Earth System Science" }, { - "attribute": "pages", + "name": "pages", "value": "1825-1845" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.5194/nhess-21-1825-2021" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9692,39 +9692,39 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Szegedy C.; Liu W.; Jia Y.; Sermanet P.; Reed S.; Anguelov D.; Rabinovich A." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1-9" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9735,39 +9735,39 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1440-1448" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9778,39 +9778,39 @@ "content": "Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2017). Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2117-2125).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2017" }, { - "attribute": "authors", + "name": "authors", "value": "Lin T.Y.; Dollár P.; Girshick R.; He K.; Hariharan B.; Belongie S." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Feature pyramid networks for object detection" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2017" }, { - "attribute": "pages", + "name": "pages", "value": "2117-2125" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9821,39 +9821,39 @@ "content": "Lu, S., Christie, G. A., Nguyen, T. T., Freeman, J. D, and Hsu, E. B., (2021). Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies. Disaster Medicine and Public Health Preparedness, 1-8.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Disaster Medicine and Public Health Preparedness (2021) 1-8" }, { - "attribute": "authors", + "name": "authors", "value": "Lu S.; Christie G.A.; Nguyen T.T.; Freeman J.D.; Hsu E.B." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies" }, { - "attribute": "venue", + "name": "venue", "value": "Disaster Medicine and Public Health Preparedness" }, { - "attribute": "pages", + "name": "pages", "value": "1-8" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9864,39 +9864,39 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409. https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Geographic Information System (2021) 13:395-409" }, { - "attribute": "authors", + "name": "authors", "value": "Malaainine M.E.I.; Lechgar H.; Rhinane H." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "YOLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Geographic Information System" }, { - "attribute": "pages", + "name": "pages", "value": "395-409" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.4236/jgis.2021.134022" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9907,39 +9907,39 @@ "content": "Kruspe, A., Kersten, J., & Klan, F. (2021). Review article: Detection of actionable tweets in crisis events. Natural Hazards Earth System Science., 21, 1825–1845. https://doi.org/10.5194/nhess-21-1825-2021.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "NHESS (2021) 21:1825-1845" }, { - "attribute": "authors", + "name": "authors", "value": "Kruspe A.; Kersten J.; Klan F." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Detection of actionable tweets in crisis events" }, { - "attribute": "venue", + "name": "venue", "value": "Natural Hazards Earth System Science" }, { - "attribute": "pages", + "name": "pages", "value": "1825-1845" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.5194/nhess-21-1825-2021" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9950,39 +9950,39 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Szegedy C.; Liu W.; Jia Y.; Sermanet P.; Reed S.; Anguelov D.; Rabinovich A." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1-9" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -9993,39 +9993,39 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1440-1448" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10036,39 +10036,39 @@ "content": "Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2017). Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2117-2125).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2017" }, { - "attribute": "authors", + "name": "authors", "value": "Lin T.Y.; Dollár P.; Girshick R.; He K.; Hariharan B.; Belongie S." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Feature pyramid networks for object detection" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2017" }, { - "attribute": "pages", + "name": "pages", "value": "2117-2125" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10079,39 +10079,39 @@ "content": "Lu, S., Christie, G. A., Nguyen, T. T., Freeman, J. D, and Hsu, E. B., (2021). Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies. Disaster Medicine and Public Health Preparedness, 1-8.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Disaster Medicine and Public Health Preparedness (2021) 1-8" }, { - "attribute": "authors", + "name": "authors", "value": "Lu S.; Christie G.A.; Nguyen T.T.; Freeman J.D.; Hsu E.B." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies" }, { - "attribute": "venue", + "name": "venue", "value": "Disaster Medicine and Public Health Preparedness" }, { - "attribute": "pages", + "name": "pages", "value": "1-8" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10122,39 +10122,39 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409. https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Geographic Information System (2021) 13:395-409" }, { - "attribute": "authors", + "name": "authors", "value": "Malaainine M.E.I.; Lechgar H.; Rhinane H." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "YOLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Geographic Information System" }, { - "attribute": "pages", + "name": "pages", "value": "395-409" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.4236/jgis.2021.134022" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10165,39 +10165,39 @@ "content": "Kruspe, A., Kersten, J., & Klan, F. (2021). Review article: Detection of actionable tweets in crisis events. Natural Hazards Earth System Science., 21, 1825–1845. https://doi.org/10.5194/nhess-21-1825-2021.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "NHESS (2021) 21:1825-1845" }, { - "attribute": "authors", + "name": "authors", "value": "Kruspe A.; Kersten J.; Klan F." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Detection of actionable tweets in crisis events" }, { - "attribute": "venue", + "name": "venue", "value": "Natural Hazards Earth System Science" }, { - "attribute": "pages", + "name": "pages", "value": "1825-1845" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.5194/nhess-21-1825-2021" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10208,39 +10208,39 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Szegedy C.; Liu W.; Jia Y.; Sermanet P.; Reed S.; Anguelov D.; Rabinovich A." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1-9" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10251,39 +10251,39 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1440-1448" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10294,39 +10294,39 @@ "content": "Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2017). Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2117-2125).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2017" }, { - "attribute": "authors", + "name": "authors", "value": "Lin T.Y.; Dollár P.; Girshick R.; He K.; Hariharan B.; Belongie S." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Feature pyramid networks for object detection" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2017" }, { - "attribute": "pages", + "name": "pages", "value": "2117-2125" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10337,39 +10337,39 @@ "content": "Lu, S., Christie, G. A., Nguyen, T. T., Freeman, J. D, and Hsu, E. B., (2021). Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies. Disaster Medicine and Public Health Preparedness, 1-8.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Disaster Medicine and Public Health Preparedness (2021) 1-8" }, { - "attribute": "authors", + "name": "authors", "value": "Lu S.; Christie G.A.; Nguyen T.T.; Freeman J.D.; Hsu E.B." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies" }, { - "attribute": "venue", + "name": "venue", "value": "Disaster Medicine and Public Health Preparedness" }, { - "attribute": "pages", + "name": "pages", "value": "1-8" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10380,39 +10380,39 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409. https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Geographic Information System (2021) 13:395-409" }, { - "attribute": "authors", + "name": "authors", "value": "Malaainine M.E.I.; Lechgar H.; Rhinane H." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "YOLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Geographic Information System" }, { - "attribute": "pages", + "name": "pages", "value": "395-409" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.4236/jgis.2021.134022" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10423,39 +10423,39 @@ "content": "Kruspe, A., Kersten, J., & Klan, F. (2021). Review article: Detection of actionable tweets in crisis events. Natural Hazards Earth System Science., 21, 1825–1845. https://doi.org/10.5194/nhess-21-1825-2021.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "NHESS (2021) 21:1825-1845" }, { - "attribute": "authors", + "name": "authors", "value": "Kruspe A.; Kersten J.; Klan F." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Detection of actionable tweets in crisis events" }, { - "attribute": "venue", + "name": "venue", "value": "Natural Hazards Earth System Science" }, { - "attribute": "pages", + "name": "pages", "value": "1825-1845" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.5194/nhess-21-1825-2021" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10466,39 +10466,39 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Szegedy C.; Liu W.; Jia Y.; Sermanet P.; Reed S.; Anguelov D.; Rabinovich A." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1-9" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10509,39 +10509,39 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1440-1448" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10552,39 +10552,39 @@ "content": "Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2017). Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2117-2125).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2017" }, { - "attribute": "authors", + "name": "authors", "value": "Lin T.Y.; Dollár P.; Girshick R.; He K.; Hariharan B.; Belongie S." }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "title", + "name": "title", "value": "Feature pyramid networks for object detection" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2017" }, { - "attribute": "pages", + "name": "pages", "value": "2117-2125" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10595,39 +10595,39 @@ "content": "Lu, S., Christie, G. A., Nguyen, T. T., Freeman, J. D, and Hsu, E. B., (2021). Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies. Disaster Medicine and Public Health Preparedness, 1-8.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Disaster Medicine and Public Health Preparedness (2021) 1-8" }, { - "attribute": "authors", + "name": "authors", "value": "Lu S.; Christie G.A.; Nguyen T.T.; Freeman J.D.; Hsu E.B." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Applications of Artificial Intelligence and Machine Learning in Disasters and Public Health Emergencies" }, { - "attribute": "venue", + "name": "venue", "value": "Disaster Medicine and Public Health Preparedness" }, { - "attribute": "pages", + "name": "pages", "value": "1-8" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10638,39 +10638,39 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409. https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Journal of Geographic Information System (2021) 13:395-409" }, { - "attribute": "authors", + "name": "authors", "value": "Malaainine M.E.I.; Lechgar H.; Rhinane H." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "YOLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Geographic Information System" }, { - "attribute": "pages", + "name": "pages", "value": "395-409" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.4236/jgis.2021.134022" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10681,39 +10681,39 @@ "content": "Kruspe, A., Kersten, J., & Klan, F. (2021). Review article: Detection of actionable tweets in crisis events. Natural Hazards Earth System Science., 21, 1825–1845. https://doi.org/10.5194/nhess-21-1825-2021.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "NHESS (2021) 21:1825-1845" }, { - "attribute": "authors", + "name": "authors", "value": "Kruspe A.; Kersten J.; Klan F." }, { - "attribute": "year", + "name": "year", "value": "2021" }, { - "attribute": "title", + "name": "title", "value": "Detection of actionable tweets in crisis events" }, { - "attribute": "venue", + "name": "venue", "value": "Natural Hazards Earth System Science" }, { - "attribute": "pages", + "name": "pages", "value": "1825-1845" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.5194/nhess-21-1825-2021" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -10724,39 +10724,39 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "CVPR 2015" }, { - "attribute": "authors", + "name": "authors", "value": "Szegedy C.; Liu W.; Jia Y.; Sermanet P.; Reed S.; Anguelov D.; Rabinovich A." }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2015" }, { - "attribute": "pages", + "name": "pages", "value": "1-9" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" }, { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "confidence", + "name": "confidence", "value": "low" } ] @@ -10767,11 +10767,11 @@ "content": "Chaaraoui, A. A., Padilla-López, J. R., Ferrández-Pastor, F. J., Nieto-Hidalgo, M., & Flórez-Revuelta, F. (2014). A vision-based system for intelligent monitoring: Human behaviour analysis and privacy by context. A Vision-Based System for Intelligent Monitoring: Human Behaviour Analysis and Privacy by Context. Alexandro, 14(5), 8895–8925. https://doi.org/10.3390/s140508895", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "note", + "name": "note", "value": "Formatting irregularities and duplication observed on page." } ] @@ -10782,11 +10782,11 @@ "content": "The references include foundational works in deep learning and object detection, such as Fast R-CNN (Girshick 2015), Feature Pyramid Networks (Lin et al. 2017), Faster R-CNN (Ren et al. 2015), and Going Deeper with Convolutions (Szegedy et al. 2015).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "note", + "name": "note", "value": "Synthesis from multiple cited works: Fast R-CNN, FPN, Faster R-CNN, etc." } ] @@ -10797,27 +10797,27 @@ "content": "Chaaraoui, A. A., Padilla-López, J. R., Ferrández-Pastor, F. J., Nieto-Hidalgo, M., & Flórez-Revuelta, F. (2014). A vision-based system for intelligent monitoring: Human behaviour analysis and privacy by context. A Vision-Based System for Intelligent Monitoring: Human Behaviour Analysis and Privacy by Context Alexandros,14(5), 8895–8925. https://doi.org/10.3390/s140508895", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2014" }, { - "attribute": "authors", + "name": "authors", "value": "Chaaraoui A A; Padilla-López J R; Ferrández-Pastor F J; Nieto-Hidalgo M; Flórez-Revuelta F" }, { - "attribute": "title", + "name": "title", "value": "A vision-based system for intelligent monitoring: Human behaviour analysis and privacy by context" }, { - "attribute": "venue", + "name": "venue", "value": "Sensors 2014; 14(5); 8895-8925" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.3390/s140508895" } ] @@ -10828,27 +10828,27 @@ "content": "Mishra, C., & Gupta, D. L. (2017). Deep Machine Learning and Neural Networks: An Overview. IAES International Journal of Artificial Intelligence (IJ-AI), 6(2), 66.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "authors", + "name": "authors", "value": "Mishra C; Gupta DL" }, { - "attribute": "title", + "name": "title", "value": "Deep Machine Learning and Neural Networks: An Overview" }, { - "attribute": "venue", + "name": "venue", "value": "IAES IJ-AI 6(2) 2017" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" } ] @@ -10859,27 +10859,27 @@ "content": "Tee, K. S., Zulkifli, A. H. B., & Soon, C. F. (2015). An activity monitoring system for elderly. ARPN Journal of Engineering and Applied Sciences, 10 (18), 8467–8472.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "authors", + "name": "authors", "value": "Tee K S; Zulkifli A H B; Soon C F" }, { - "attribute": "title", + "name": "title", "value": "An activity monitoring system for elderly" }, { - "attribute": "venue", + "name": "venue", "value": "ARPN J Eng Appl Sci 10(18) 2015" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" } ] @@ -10890,27 +10890,27 @@ "content": "Wang, P. (2019). On Defining Artificial Intelligence. Journal of Artificial General Intelligence , 10 (2), 1–37. https://doi.org/10.2478/jagi-2019-0002", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2019" }, { - "attribute": "authors", + "name": "authors", "value": "Wang P." }, { - "attribute": "title", + "name": "title", "value": "On Defining Artificial Intelligence" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Artificial General Intelligence 10(2) 2019" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.2478/jagi-2019-0002" } ] @@ -10921,27 +10921,27 @@ "content": "Attaran, M., & Deb, P. (2018). Machine Learning : The New ’ Big Thing ’ for Competitive Advantage. International Journal of Knowledge Engineering and Data Mining, 5(4), 277-305. https://doi.org/10.1504/IJKEDM.2018.10015621.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2018" }, { - "attribute": "authors", + "name": "authors", "value": "Attaran M; Deb P" }, { - "attribute": "title", + "name": "title", "value": "Machine Learning: The New ’ Big Thing ’ for Competitive Advantage" }, { - "attribute": "venue", + "name": "venue", "value": "Int J Knowl Eng Data Mining 5(4) 2018" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.1504/IJKEDM.2018.10015621" } ] @@ -10952,23 +10952,23 @@ "content": "Deng, J., Dong, W., Socher, R., Li, L. J., Li, K., & Fei-Fei, L. (2009). Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition (pp. 248-255).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2009" }, { - "attribute": "authors", + "name": "authors", "value": "Deng J; Dong W; Socher R; Li LJ; Li K; Fei-Fei L" }, { - "attribute": "title", + "name": "title", "value": "Imagenet: A large-scale hierarchical image database" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2009" } ] @@ -10979,27 +10979,27 @@ "content": "Forbes, G., Massie, S., & Craw, S. (2020) WiFi-based Human Activity Recognition using Raspberry Pi. International Conference on Tools with Artificial Intelligence (ICTAI), 5(3), 722–730. https://doi.org/10.1016/j.ijtst.2016.12.001", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2020" }, { - "attribute": "authors", + "name": "authors", "value": "Forbes G; Massie S; Craw S" }, { - "attribute": "title", + "name": "title", "value": "WiFi-based Human Activity Recognition using Raspberry Pi" }, { - "attribute": "venue", + "name": "venue", "value": "ICTAI 2020 5(3) 722-730" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.1016/j.ijtst.2016.12.001" } ] @@ -11010,23 +11010,23 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE CVPR/ICCV 2015" } ] @@ -11037,27 +11037,27 @@ "content": "Gupta, D. L., & Mishra, C. (2017). Deep Machine Learning and Neural Networks: An Overview. IAES International Journal of Artificial Intelligence (IJ-AI), 6(2), 66-72.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "authors", + "name": "authors", "value": "Gupta D L; Mishra C" }, { - "attribute": "title", + "name": "title", "value": "Deep Machine Learning and Neural Networks: An Overview" }, { - "attribute": "venue", + "name": "venue", "value": "IAES IJ-AI 6(2) 2017" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" } ] @@ -11068,27 +11068,27 @@ "content": "Chaaraoui, A. A., Padilla-L´opez, J. R., Ferr´andez-Pastor, F. J., Nieto-Hidalgo, M., & Fl´orez-Revuelta, F. (2014). A Vision-Based System for Intelligent Monitoring: Human Behaviour Analysis and Privacy by Context. Sensors 2014, 14, 8895-8925.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2014" }, { - "attribute": "authors", + "name": "authors", "value": "Chaaraoui A A; Padilla-López J R; Ferrández-Pastor F J; Nieto-Hidalgo M; Flórez-Revuelta F" }, { - "attribute": "title", + "name": "title", "value": "A Vision-Based System for Intelligent Monitoring: Human Behaviour Analysis and Privacy by Context" }, { - "attribute": "venue", + "name": "venue", "value": "Sensors 2014; 14; 8895-8925" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" } ] @@ -11099,23 +11099,23 @@ "content": "Deng, J., Dong, W., Socher, R., Li, L. J., Li, K., & Fei-Fei, L. (2009). Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition (pp. 248-255).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2009" }, { - "attribute": "authors", + "name": "authors", "value": "Deng J; Dong W; Socher R; Li LJ; Li K; Fei-Fei L" }, { - "attribute": "title", + "name": "title", "value": "Imagenet: A large-scale hierarchical image database" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR 2009" } ] @@ -11126,27 +11126,27 @@ "content": "Forbes, G., Massie, S., & Craw, S. (2020) WiFi-based Human Activity Recognition using Raspberry Pi. International Conference on Tools with Artificial Intelligence (ICTAI), 5(3), 722–730. https://doi.org/10.1016/j.ijtst.2016.12.001", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2020" }, { - "attribute": "authors", + "name": "authors", "value": "Forbes G; Massie S; Craw S" }, { - "attribute": "title", + "name": "title", "value": "WiFi-based Human Activity Recognition using Raspberry Pi" }, { - "attribute": "venue", + "name": "venue", "value": "ICTAI 2020 5(3) 722-730" }, { - "attribute": "doi", + "name": "doi", "value": "https://doi.org/10.1016/j.ijtst.2016.12.001" } ] @@ -11157,23 +11157,23 @@ "content": "Girshick, R. (2015). Fast r-cnn. In Proceedings of the IEEE international conference on computer vision (pp. 1440-1448).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2015" }, { - "attribute": "authors", + "name": "authors", "value": "Girshick R." }, { - "attribute": "title", + "name": "title", "value": "Fast R-CNN" }, { - "attribute": "venue", + "name": "venue", "value": "CVPR/ICCV 2015" } ] @@ -11184,27 +11184,27 @@ "content": "Gupta, D. L., & Mishra, C. (2017). Deep Machine Learning and Neural Networks: An Overview. IAES International Journal of Artificial Intelligence (IJ-AI), 6(2), 66-72.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "year", + "name": "year", "value": "2017" }, { - "attribute": "authors", + "name": "authors", "value": "Gupta D L; Mishra C" }, { - "attribute": "title", + "name": "title", "value": "Deep Machine Learning and Neural Networks: An Overview" }, { - "attribute": "venue", + "name": "venue", "value": "IAES IJ-AI 6(2) 2017" }, { - "attribute": "doi", + "name": "doi", "value": "N/A" } ] @@ -11215,15 +11215,15 @@ "content": "Traffic Monitoring System. sustainability, 12, 9177, 1-21 .", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "page", + "name": "page", "value": "55" }, { - "attribute": "topic", + "name": "topic", "value": "Deep Learning for Traffic Monitoring" } ] @@ -11234,15 +11234,15 @@ "content": "Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards real-time object detection with region proposal networks. Advancements in neural information processing systems, 28.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Real-time object detection" }, { - "attribute": "year", + "name": "year", "value": "2015" } ] @@ -11253,11 +11253,11 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "CNN architectures" } ] @@ -11268,11 +11268,11 @@ "content": "Kim, K. H., Hong, S., Roh, B., Cheon, Y., & Park, M. (2016). Pvanet: Deep but lightweight neural networks for real-time object detection. arXiv preprint arXiv:1608.08021.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Lightweight real-time detectors" } ] @@ -11283,11 +11283,11 @@ "content": "Malaainine, M. E, I., Lechgar, H., & Rhinane, H. (2021). Y OLOv2 Deep Learning Model and GIS Based Algorithms for Vehicle Tracking. Journal of Geographic Information System, 2021, 13, 395-409 https://doi.org/10.4236/jgis.2021.134022.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "YOLOv2 for vehicle tracking" } ] @@ -11298,11 +11298,11 @@ "content": "Najafabadi, M. M., Villanustre, F., Khoshgoftaar, T. M., Seliya, N., Wald, R., & Muharemagic, E. (2015). Deep learning applications and challenges in big data analytics. Journal of Big Data, 1–21. https://doi.org/10.1186/s40537-014-0007-7; Sarker, I. H. (2021). Machine Learning : Algorithms , Real ‑ World Applications and Research Directions. SN Computer Science, 2 (3), 1–21. https://doi.org/10.1007/s42979-021-00592-x", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Deep learning in big data and general ML" } ] @@ -11313,11 +11313,11 @@ "content": "Shehata, M., Abo-alez, R., Zaghlool, F., & Abou-kreisha, M. T. (2020). Deep Learning Based Vehicle Tracking in Traffic Management. International Journal of Computer Trends and Technology (IJCTT), 67(3), 5-8. https://doi.org/10.14445/22312803/IJCTT-V67I3P102", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Vehicle tracking in traffic management" } ] @@ -11328,11 +11328,11 @@ "content": "Surya, E., & Ningsih, Y.K. (2019). Smart Monitoring System Using Raspberry-Pi and Smartphone. Department of Electrical Engineering Faculty of Industrial Technology , Trisakti University Jakarta Indonesia, 7(1), 72-84.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Smart monitoring with Raspberry Pi" } ] @@ -11343,11 +11343,11 @@ "content": "Zhang, Y., Song, X.,Wang, M., Guan, T., Liu, J., Wang, Z., Zhen, Y., Zhang, D., & Gu, Xiaoyi. (2020). Research on visual vehicle detection and tracking based on deep learning. IOP Conference Series: Materials Science and Engineering, 892(2020), 1-7 https://doi.org/10.1088/1742-6596/1621/1/012048.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Visual vehicle detection and tracking" } ] @@ -11358,11 +11358,11 @@ "content": "Zhou, Y., Zhou, J., & Liao, F. (2020). Research on Vehicle Tracking Algorithm Based on Deep Learning. Journal of Physics: Conference Series, 1621 (2020), 1-8. https://doi.org/10.1088/1742-6596/1621/1/012048.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Vehicle tracking algorithm" } ] @@ -11373,19 +11373,19 @@ "content": "Traffic Monitoring System. sustainability, 12, 9177, 1-21 .", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "topic", + "name": "topic", "value": "Traffic Monitoring System" }, { - "attribute": "page", + "name": "page", "value": "56" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -11396,19 +11396,19 @@ "content": "Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Ren et al." }, { - "attribute": "date", + "name": "date", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Faster R-CNN: Towards real-time object detection with region proposal networks" }, { - "attribute": "venue", + "name": "venue", "value": "Advances in Neural Information Processing Systems (NIPS/NeurIPS)" } ] @@ -11419,23 +11419,23 @@ "content": "Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., & Rabinovich, A. (2015). Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1-9).", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Szegedy et al." }, { - "attribute": "date", + "name": "date", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Going deeper with convolutions" }, { - "attribute": "venue", + "name": "venue", "value": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR)" }, { - "attribute": "pages", + "name": "pages", "value": "pp. 1-9" } ] @@ -11446,19 +11446,19 @@ "content": "Najafabadi, M. M., Villanustre, F., Khoshgoftaar, T. M., Seliya, N., Wald, R., & Muharemagic, E. (2015). Deep learning applications and challenges in big data analytics. Journal of Big Data, 1–21.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Najafabadi et al." }, { - "attribute": "date", + "name": "date", "value": "2015" }, { - "attribute": "title", + "name": "title", "value": "Deep learning applications and challenges in big data analytics" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Big Data" } ] @@ -11469,15 +11469,15 @@ "content": "Surya, E., & , Ningsih, Y.K. (2019). Smart Monitoring System Using Raspberry-Pi and Smartphone.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Surya & Ningsih (2019)" }, { - "attribute": "date", + "name": "date", "value": "2019" }, { - "attribute": "title", + "name": "title", "value": "Smart Monitoring System Using Raspberry-Pi and Smartphone" } ] @@ -11488,19 +11488,19 @@ "content": "Shehata, M., Abo-alez, R., Zaghlool, F., & Abou-kreisha, M. T. (2020). Deep Learning Based Vehicle Tracking in Traffic Management. International Journal of Computer Trends and Technology (IJCTT), 67(3), 5-8.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Shehata et al." }, { - "attribute": "date", + "name": "date", "value": "2020" }, { - "attribute": "title", + "name": "title", "value": "Deep Learning Based Vehicle Tracking in Traffic Management" }, { - "attribute": "venue", + "name": "venue", "value": "IJCTT" } ] @@ -11511,19 +11511,19 @@ "content": "Usmankhujaev, S., Baydadaev, S., & Woo, K. J. (2020). Real-Time, Deep Learning Based Wrong Direction Detection. Applied Sciences, 10(2020), 1-13.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Usmankhujaev et al." }, { - "attribute": "date", + "name": "date", "value": "2020" }, { - "attribute": "title", + "name": "title", "value": "Real-Time, Deep Learning Based Wrong Direction Detection" }, { - "attribute": "venue", + "name": "venue", "value": "Applied Sciences" } ] @@ -11534,19 +11534,19 @@ "content": "Wang, P. (2019). On Defining Artificial Intelligence. Journal of Artificial General Intelligence 10(2) 1-37, 2019.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Wang" }, { - "attribute": "date", + "name": "date", "value": "2019" }, { - "attribute": "title", + "name": "title", "value": "On Defining Artificial Intelligence" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Artificial General Intelligence" } ] @@ -11557,19 +11557,19 @@ "content": "Zhang, Y., Song, X.,Wang, M., Guan, T., Liu, J., Wang, Z., Zhen, Y., Zhang, D., & Gu, Xiaoyi. (2020). Research on visual vehicle detection and tracking based on deep learning. IOP Conference Series: Materials Science and Engineering, 892(2020), 1-7.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Zhang et al." }, { - "attribute": "date", + "name": "date", "value": "2020" }, { - "attribute": "title", + "name": "title", "value": "Research on visual vehicle detection and tracking based on deep learning" }, { - "attribute": "venue", + "name": "venue", "value": "IOP Conference Series: Materials Science and Engineering" } ] @@ -11580,19 +11580,19 @@ "content": "Zhou, Y., Zhou, J., & Liao, F. (2020). Research on Vehicle Tracking Algorithm Based on Deep Learning. Journal of Physics: Conference Series, 1621 (2020), 1-8.", "attributes": [ { - "attribute": "author", + "name": "author", "value": "Zhou et al." }, { - "attribute": "date", + "name": "date", "value": "2020" }, { - "attribute": "title", + "name": "title", "value": "Research on Vehicle Tracking Algorithm Based on Deep Learning" }, { - "attribute": "venue", + "name": "venue", "value": "Journal of Physics: Conference Series" } ] @@ -11603,11 +11603,11 @@ "content": "https://doi.org/10.1186/s40537-014-0007-7", "attributes": [ { - "attribute": "type", + "name": "type", "value": "DOI" }, { - "attribute": "value", + "name": "value", "value": "https://doi.org/10.1186/s40537-014-0007-7" } ] @@ -11618,11 +11618,11 @@ "content": "Surya, E., & , Ningsih, Y.K. (2019). Smart Monitoring System Using Raspberry-Pi and Smartphone.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "note", + "name": "note", "value": "Formatting irregularities" } ] @@ -11633,11 +11633,11 @@ "content": "Shehata, M., Abo-alez, R., Zaghlool, F., & Abou-kreisha, M. T. (2020). Deep Learning Based Vehicle Tracking in Traffic Management. IJCTT, 67(3), 5-8; Surya entry; Zhang et al. 2020 IOP Conference Series; Zhou et al. 2020 JPCS.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "References" }, { - "attribute": "note", + "name": "note", "value": "Diversity of venues (IJCTT, Applied Sciences, IOP, JPCS)" } ] @@ -11648,11 +11648,11 @@ "content": "56", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Page footer" }, { - "attribute": "page", + "name": "page", "value": "56" } ] @@ -11669,19 +11669,19 @@ "content": "Asystem-levelanalysisindicatesthatdevicesbased on our architecture could achieve performance parity with GPUs on a simple image benchmark using approximately 10,000 times less energy.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Extropic Corporation research paper" }, { - "attribute": "date", + "name": "date", "value": "October 29, 2025" }, { - "attribute": "energy_efficiency_claim", + "name": "energy_efficiency_claim", "value": "10,000x improvement" }, { - "attribute": "benchmark_type", + "name": "benchmark_type", "value": "simple image benchmark" } ] @@ -11692,19 +11692,19 @@ "content": "Every year, U.S. firms spend an amount larger than the inflation-adjusted cost of the Apollo program on AI-focused data centers [1, 2]. By 2030, these data centers could consume 10% of all of the energy produced in the U.S. [3].", "attributes": [ { - "attribute": "source", + "name": "source", "value": "U.S. energy consumption projections" }, { - "attribute": "date", + "name": "date", "value": "2025 projection" }, { - "attribute": "energy_consumption", + "name": "energy_consumption", "value": "10% of U.S. energy by 2030" }, { - "attribute": "annual_spending", + "name": "annual_spending", "value": "Exceeds Apollo program cost" } ] @@ -11715,19 +11715,19 @@ "content": "Had a different style of hardware been popular in the last few decades, AI algorithms would have evolved in a completely different direction, and possibly a more energy-efficient one. This interplay between algorithm research and hardware availability is known as the 'Hardware Lottery' [19], and it entrenches hardware-algorithm pairings that may be far from optimal.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Research analysis" }, { - "attribute": "problem", + "name": "problem", "value": "Hardware Lottery" }, { - "attribute": "consequence", + "name": "consequence", "value": "Suboptimal algorithm evolution" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "Critical" } ] @@ -11738,19 +11738,19 @@ "content": "The mixing-expressivity tradeoff (MET) summarizes this issue with existing probabilistic computer architectures, reflecting the fact that modeling performance and sampling hardness are coupled for MEBMs. Specifically, as the expressivity (modeling performance) of an MEBM increases, its mixing time (the amount of computational effort needed to draw independent samples from the MEBM's distribution) becomes progressively longer, resulting in expensive inference and unstable training [52, 53].", "attributes": [ { - "attribute": "problem", + "name": "problem", "value": "Mixing-Expressivity Tradeoff (MET)" }, { - "attribute": "consequence", + "name": "consequence", "value": "Exponentially longer mixing times" }, { - "attribute": "impact", + "name": "impact", "value": "Expensive inference and unstable training" }, { - "attribute": "solution_type", + "name": "solution_type", "value": "Addressed by DTM approach" } ] @@ -11761,19 +11761,19 @@ "content": "To enable a near-term, large-scale realization of the DTCA, we leveraged the shot-noise dynamics of subthreshold transistors [45] to build an RNG that is fast, energy-efficient, and small. Our all-transistor RNG is programmable and has the desired sigmoidal response to a control voltage, as shown by experimental measurements in Fig. 4 (a).", "attributes": [ { - "attribute": "hardware_approach", + "name": "hardware_approach", "value": "All-transistor implementation" }, { - "attribute": "technology", + "name": "technology", "value": "Subthreshold transistor dynamics" }, { - "attribute": "scalability", + "name": "scalability", "value": "CMOS compatible" }, { - "attribute": "component_type", + "name": "component_type", "value": "RNG (Random Number Generator)" } ] @@ -11784,19 +11784,19 @@ "content": "At the top level, we introduce a new probabilistic computer architecture that runs Denoising Thermodynamic Models (DTMs) instead of monolithic EBMs. As their name suggests, rather than using the hardware's EBM to model data distributions directly, DTMs sequentially compose many hardware EBMs to model a process that denoises the data gradually. Diffusion models [18, 44] also follow this denoising procedure and are much more capable than EBMs. This key architectural change addresses a fundamental issue with previous approaches and represents the first scalable method for applying probabilistic hardware to machine learning.", "attributes": [ { - "attribute": "innovation", + "name": "innovation", "value": "First scalable probabilistic hardware approach" }, { - "attribute": "key_change", + "name": "key_change", "value": "Sequential EBM composition" }, { - "attribute": "benefit", + "name": "benefit", "value": "Avoids mixing-expressivity tradeoff" }, { - "attribute": "significance", + "name": "significance", "value": "Fundamental architectural breakthrough" } ] @@ -11807,19 +11807,19 @@ "content": "The DTM that produced the results shown in Fig. 1 used Boltzmann machine EBMs. Boltzmann machines, also known as Ising models in physics, use binary random variables and are the simplest type of discrete-variable EBM.\n\nBoltzmann machines are hardware efficient because the Gibbs sampling update rule required to sample from them is simple. Boltzmann machines implement energy functions of the form\nE(x) =−β \n⟨\n∑_{i≠j} x_i J_ij x_j + ∑_{i=1}^n h_i x_i\n⟩\n,(10)", "attributes": [ { - "attribute": "implementation", + "name": "implementation", "value": "Sparse Boltzmann machines" }, { - "attribute": "model_type", + "name": "model_type", "value": "Ising models/EBMs" }, { - "attribute": "efficiency_reason", + "name": "efficiency_reason", "value": "Simple Gibbs sampling" }, { - "attribute": "variable_type", + "name": "variable_type", "value": "Binary random variables" } ] @@ -11830,19 +11830,19 @@ "content": "In addition to these integration challenges, GPU performance per joule is doubling every few years [24], making it very difficult for cutting-edge computing schemes to gain mainstream adoption.", "attributes": [ { - "attribute": "challenge", + "name": "challenge", "value": "GPU efficiency improvement rate" }, { - "attribute": "rate", + "name": "rate", "value": "Doubling every few years" }, { - "attribute": "impact", + "name": "impact", "value": "Barriers to new adoption" }, { - "attribute": "context", + "name": "context", "value": "Competitive landscape analysis" } ] @@ -11853,19 +11853,19 @@ "content": "The modular nature of DTMs enables various hardware implementations. For example, each EBM in the chain can be implemented using distinct physical circuitry on the same chip, as shown in Fig. 3 (b). Alternatively, the various EBMs may be split across several communicating chips or implemented by the same hardware, reprogrammed with distinct sets of weights at different times.", "attributes": [ { - "attribute": "architecture", + "name": "architecture", "value": "Modular DTCA" }, { - "attribute": "implementation_options", + "name": "implementation_options", "value": "Multiple chip configurations" }, { - "attribute": "flexibility", + "name": "flexibility", "value": "Reprogrammable hardware" }, { - "attribute": "scalability", + "name": "scalability", "value": "Various deployment options" } ] @@ -11876,11 +11876,11 @@ "content": "The mixing-expressivity tradeoff (MET) summarizes this issue with existing probabilistic computer architectures, reflecting the fact that modeling performance and sampling hardness are coupled for MEBMs. Specifically, as the expressivity (modeling performance) of an MEBM increases, its mixing time (the amount of computational effort needed to draw independent samples from the MEBM's distribution) becomes progressively longer, resulting in expensive inference and unstable training [52, 53].", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Academic paper text" }, { - "attribute": "reference", + "name": "reference", "value": "[52, 53]" } ] @@ -11891,11 +11891,11 @@ "content": "DTMs merge EBMs with diffusion models, offering an alternative path for probabilistic computing that assuages the MET. DTMs are a slight generalization of recent work from deep learning practitioners that has pushed the frontier of EBM performance [57–60].", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Academic paper text" }, { - "attribute": "reference", + "name": "reference", "value": "[57–60]" } ] @@ -11906,11 +11906,11 @@ "content": "Denoising models attempt to reverse a process that gradually transforms the data distribution Q(x0) into simple noise. This forward process is given by the Markov chain Q(x0, . . . , xT ) = Q(x0) ∏t=1T Q(xt|xt−1). (3) The forward process is typically chosen such that it has a unique stationary distribution Q(xT ), which takes a simple form (e.g., Gaussian or uniform).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Academic paper text" }, { - "attribute": "equation", + "name": "equation", "value": "Eq. (3)" } ] @@ -11921,11 +11921,11 @@ "content": "The MET makes it clear that MEBMs have a flaw that makes them challenging and energetically costly to scale.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Author's assessment" }, { - "attribute": "perspective", + "name": "perspective", "value": "Critical" } ] @@ -11936,11 +11936,11 @@ "content": "Instead of trying to use a single EBM to model the data, DTMs chain many EBMs to gradually build up to the complexity of the data distribution. This gradual buildup of complexity allows the landscape of each EBM in the chain to remain relatively simple (and easy to sample) without limiting the complexity of the distribution modeled by the chain as a whole;", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Author's technical assessment" }, { - "attribute": "perspective", + "name": "perspective", "value": "Positive/optimistic" } ] @@ -11951,11 +11951,11 @@ "content": "For large differences in energy, like those encountered when trying to move between two valleys separated by a significant barrier, this probability can be very close to zero. These barriers grind the iterative sampler to a halt.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Technical description" }, { - "attribute": "metaphor", + "name": "metaphor", "value": "Valley/barrier analogy" } ] @@ -11966,11 +11966,11 @@ "content": "Reversal of the forward process is achieved by learning a set of distributions Pθ(xt−1|xt) that approximate the reversal of each conditional in Eq. (3). In doing so, we learn a map from simple noise to the data distribution, which can then be used to generate new data.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Methodological description" }, { - "attribute": "process", + "name": "process", "value": "Reverse diffusion" } ] @@ -11981,11 +11981,11 @@ "content": "In traditional diffusion models, the forward process is made to be sufficiently fine-grained (using a large num", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Comparative statement about traditional approaches" }, { - "attribute": "completeness", + "name": "completeness", "value": "Incomplete sentence" } ] @@ -11996,15 +11996,15 @@ "content": "ber of stepsT) such that the conditional distribution of each step in the reverse process takes some simple form (such as Gaussian or categorical). This simple distribution is parameterized by a neural network, which is then trained to minimize the Kullback-Leibler (KL) divergence between the joint distributionsQandP θ, LDN (θ) =D Q(x0, . . . , xT ) Pθ(x0, . . . , xT ) ,(4)", "attributes": [ { - "attribute": "method", + "name": "method", "value": "traditional diffusion models" }, { - "attribute": "objective", + "name": "objective", "value": "minimize KL divergence" }, { - "attribute": "section", + "name": "section", "value": "model training" } ] @@ -12015,11 +12015,11 @@ "content": "where the joint distribution of the model is the product of the learned conditionals: Pθ(x0, . . . , xT ) =Q(x T ) TY t=1 Pθ(xt−1|xt).(5)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(5)" }, { - "attribute": "section", + "name": "section", "value": "joint distribution" } ] @@ -12030,15 +12030,15 @@ "content": "In many cases, it is straight- forward to re-cast the forward process in an exponential form, Q(xt|xt−1)∝e −Ef t−1(xt−1,xt),(6)", "attributes": [ { - "attribute": "method", + "name": "method", "value": "EBM re-casting" }, { - "attribute": "equation", + "name": "equation", "value": "(6)" }, { - "attribute": "section", + "name": "section", "value": "EBM approach" } ] @@ -12049,15 +12049,15 @@ "content": "To maximally leverage probabilistic hardware for EBM sampling, DTMs generalize Eq. (7) by introducing latent variables{z t}: Pθ(xt−1|xt)∝ X zt−1 e−(Ef t−1(xt−1,xt)+Eθ t−1(xt−1,zt−1,θ)). (8) Introducing latent variables allows the size and complexity of the probabilistic model to be increased independently of the data dimension.", "attributes": [ { - "attribute": "method", + "name": "method", "value": "DTM generalization" }, { - "attribute": "equation", + "name": "equation", "value": "(8)" }, { - "attribute": "innovation", + "name": "innovation", "value": "latent variables" } ] @@ -12068,15 +12068,15 @@ "content": "A convenient property of DTMs is that if the ap- proximation to the reverse-process conditional is exact (Pθ(xt−1|xt)→Q(x t−1|xt)), one also learns the marginal distribution att−1, Q(xt−1)∝ X zt−1 e−Eθ t−1(xt−1,zt−1,θ).(9)", "attributes": [ { - "attribute": "property", + "name": "property", "value": "marginal learning" }, { - "attribute": "equation", + "name": "equation", "value": "(9)" }, { - "attribute": "condition", + "name": "condition", "value": "exact reverse-process approximation" } ] @@ -12087,15 +12087,15 @@ "content": "As the number of steps in the forward process is increased, the effect of each noising step becomes smaller, meaning that Ef t−1 more tightly bindsx t tox t−1. This binding can simplify the distribution given in Eq. (7)... As illustrated in Fig. 3 (a), models of the form given in Eq. (7) reshape simple noise into an approximation of the data distribution. IncreasingTwhile holding the EBM architecture constant simultaneously increases the expressive power of the chain and makes each step easier to sample from, entirely bypassing the MET.", "attributes": [ { - "attribute": "benefit", + "name": "benefit", "value": "increased expressive power" }, { - "attribute": "advantage", + "name": "advantage", "value": "easier sampling" }, { - "attribute": "figure", + "name": "figure", "value": "Fig. 3(a)" } ] @@ -12106,15 +12106,15 @@ "content": "The Denoising Thermodynamic Computer Architec- ture (DTCA) tightly integrates DTMs into probabilistic hardware, allowing for the highly efficient implementa- (b)A sketch of how a chip based on the DTCA chains hard- ware EBMs to approximate the reverse process. Each EBM is implemented by distinct circuitry, parts of which are dedicated to receiving the inputs and conditionally sampling the outputs and latents.", "attributes": [ { - "attribute": "architecture", + "name": "architecture", "value": "DTCA" }, { - "attribute": "implementation", + "name": "implementation", "value": "probabilistic hardware" }, { - "attribute": "hardware_component", + "name": "hardware_component", "value": "distinct circuitry per EBM" } ] @@ -12125,15 +12125,15 @@ "content": "Practical implementations of the DTCA utilize natural-to-implement EBMs that exhibit sparse and local connectivity, as is typical in the literature [33]. This constraint allows sampling of the EBM to be performed by massively parallel arrays of primitive circuitry that implement Gibbs sampling.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "DTCA architecture description" }, { - "attribute": "technical_approach", + "name": "technical_approach", "value": "hardware implementation" }, { - "attribute": "reference", + "name": "reference", "value": "[33]" } ] @@ -12144,15 +12144,15 @@ "content": "A key feature of the DTCA is thatEf t−1 can be implemented efficiently using our constrained EBMs. Specifically, for both continuous and discrete diffusion,E f t−1 can be implemented using a single pairwise interaction between corresponding variables inxt andx t−1; see Ap- pendix A.1 and C.1 for details. This structure can be reflected in how the chip is laid out to implement these interactions without violating locality constraints. Critically, Eq. (8) places no constraints on the form ofE θ t−1. Therefore, we are free to use EBMs that our hardware implements especially efficiently.", "attributes": [ { - "attribute": "technical_approach", + "name": "technical_approach", "value": "algorithm efficiency" }, { - "attribute": "constraint_type", + "name": "constraint_type", "value": "pairwise interactions" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. (8)" } ] @@ -12163,19 +12163,19 @@ "content": "To understand the performance of a future hardware device, we developed a GPU simulator of the DTCA and used it to train a DTM on the Fashion-MNIST dataset. We measure the performance of the DTM using FID and utilize a physical model to estimate the energy required to generate new images. These numbers can be compared to conventional algorithm/hardware pairings, such as a VAE running on a GPU; these results are shown in Fig. 1.", "attributes": [ { - "attribute": "evaluation_method", + "name": "evaluation_method", "value": "GPU simulation" }, { - "attribute": "dataset", + "name": "dataset", "value": "Fashion-MNIST" }, { - "attribute": "metrics", + "name": "metrics", "value": "FID, energy consumption" }, { - "attribute": "comparison_baselines", + "name": "comparison_baselines", "value": "VAE on GPU" } ] @@ -12186,19 +12186,19 @@ "content": "The DTM that produced the results shown in Fig. 1 used Boltzmann machine EBMs. Boltzmann machines, also known as Ising models in physics, use binary random variables and are the simplest type of discrete-variable EBM. Boltzmann machines implement energy functions of the form E(x) =−β 〈X i̸=j xiJijxj + X i=1 hixi 〉,(10) where eachx i ∈ {−1,1}.", "attributes": [ { - "attribute": "model_type", + "name": "model_type", "value": "Boltzmann machine" }, { - "attribute": "variable_type", + "name": "variable_type", "value": "binary random variables" }, { - "attribute": "alternative_name", + "name": "alternative_name", "value": "Ising models" }, { - "attribute": "equation_reference", + "name": "equation_reference", "value": "Eq. (10)" } ] @@ -12209,19 +12209,19 @@ "content": "The Gibbs sampling update rule for sampling from the corresponding EBM is P(X i[k+ 1] = +1|X[k] =x) =σ 〈2β 〈X j̸=i Jij xj+hi 〉〉, (11) which can be evaluated simply using an appropriately biased source of random bits.", "attributes": [ { - "attribute": "algorithm", + "name": "algorithm", "value": "Gibbs sampling" }, { - "attribute": "probability_function", + "name": "probability_function", "value": "sigmoidal" }, { - "attribute": "implementation_medium", + "name": "implementation_medium", "value": "random bits" }, { - "attribute": "equation_reference", + "name": "equation_reference", "value": "Eq. (11)" } ] @@ -12232,19 +12232,19 @@ "content": "Specifically, the EBMs employed in this work were sparse, deep Boltzmann machines comprisingL×Lgrids of binary variables, whereL= 70was used in most cases. Eachvariablewasconnectedtoseveral(inmostcases, 12) of its neighbors following a simple pattern. At random, some of the variables were selected to represent the data xt−1, and the rest were assigned to the latent variables zt−1. Then, an extra node was connected to each data node to implement the coupling toxt.", "attributes": [ { - "attribute": "architecture", + "name": "architecture", "value": "sparse, deep Boltzmann machines" }, { - "attribute": "grid_size", + "name": "grid_size", "value": "L×L, L=70" }, { - "attribute": "connectivity", + "name": "connectivity", "value": "12 neighbors typically" }, { - "attribute": "variable_types", + "name": "variable_types", "value": "data nodes, latent variables" } ] @@ -12255,23 +12255,23 @@ "content": "To enable a near-term, large-scale realization of the DTCA, we leveraged the shot-noise dynamics of sub- threshold transistors [45] to build an RNG that is fast, energy-efficient, and small. Our all-transistor RNG is programmable and has the desired sigmoidal response to a control voltage, as shown by experimental measurements in Fig. 4 (a). The stochastic voltage signal output from the RNG has an approximately exponential autocorrelation function that decays in around100ns, as il- lustrated in Fig. 4 (b).", "attributes": [ { - "attribute": "hardware_component", + "name": "hardware_component", "value": "RNG" }, { - "attribute": "implementation_technology", + "name": "implementation_technology", "value": "subthreshold transistors" }, { - "attribute": "properties", + "name": "properties", "value": "fast, energy-efficient, small, programmable" }, { - "attribute": "response_characteristic", + "name": "response_characteristic", "value": "sigmoidal" }, { - "attribute": "correlation_time", + "name": "correlation_time", "value": "~100ns" } ] @@ -12282,15 +12282,15 @@ "content": "The modular nature of DTMs enables various hardware implementations. For example, each EBM in the chain can be implemented using distinct physical circuitry on the same chip, as shown in Fig. 3 (b). Alternatively, the various EBMs may be split across several communicating chips or implemented by the same hardware, reprogrammed with distinct sets of weights at different times.", "attributes": [ { - "attribute": "design_approach", + "name": "design_approach", "value": "modular architecture" }, { - "attribute": "flexibility", + "name": "flexibility", "value": "multiple implementation options" }, { - "attribute": "hardware_options", + "name": "hardware_options", "value": "distinct circuits, split chips, reprogrammable" } ] @@ -12301,15 +12301,15 @@ "content": "A practical advantage to our all-transistor RNG is that detailedandprovenfoundry-providedmodelscanbeused to study the effect of manufacturing variations on our", "attributes": [ { - "attribute": "design_advantage", + "name": "design_advantage", "value": "manufacturing variability analysis" }, { - "attribute": "model_availability", + "name": "model_availability", "value": "foundry-provided models" }, { - "attribute": "optimization_approach", + "name": "optimization_approach", "value": "systematic design" } ] @@ -12320,19 +12320,19 @@ "content": "Due to our chosen connectivity patterns, our Boltz-mann machines are bipartite (two-colorable). Since each color block can be sampled in parallel, a single itera- tion of Gibbs sampling corresponds to sampling the first colorblockconditionedonthesecondandthenviceversa. Starting from some random initialization, this block sampling procedure could then be repeated forKiterations (whereKis longer than the mixing time of the sampler, typicallyK≈1000) to draw samples from Eq. (7) for each step in the approximation to the reverse process.", "attributes": [ { - "attribute": "sampling_method", + "name": "sampling_method", "value": "block sampling" }, { - "attribute": "parallelization", + "name": "parallelization", "value": "bipartite color blocks" }, { - "attribute": "iterations", + "name": "iterations", "value": "K≈1000" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. (7)" } ] @@ -12343,15 +12343,15 @@ "content": "Practical implementations of the DTCA utilize natural-to-implement EBMs that exhibit sparse and local connectivity, as is typical in the literature [33]. This constraint allows sampling of the EBM to be performed by massively parallel arrays of primitive circuitry that implement Gibbs sampling.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Literature [33]" }, { - "attribute": "implementation_type", + "name": "implementation_type", "value": "Hardware circuitry" }, { - "attribute": "sampling_method", + "name": "sampling_method", "value": "Gibbs sampling" } ] @@ -12362,15 +12362,15 @@ "content": "A key feature of the DTCA is thatEf t−1 can be implemented efficiently using our constrained EBMs. Specifically, for both continuous and discrete diffusion,E f t−1 can be implemented using a single pairwise interaction between corresponding variables inxt andx t−1.", "attributes": [ { - "attribute": "feature", + "name": "feature", "value": "DTCA efficiency" }, { - "attribute": "diffusion_types", + "name": "diffusion_types", "value": "Continuous and discrete" }, { - "attribute": "implementation", + "name": "implementation", "value": "Pairwise interaction" } ] @@ -12381,11 +12381,11 @@ "content": "At the lowest level, this corresponds to high-dimensional, regularly structured latent variable EBM. If more powerful models are desired, these hardware latent-variable EBMs can be arbitrarily scaled by combining them into software-defined graphical models.", "attributes": [ { - "attribute": "scalability", + "name": "scalability", "value": "Arbitrary scaling" }, { - "attribute": "model_type", + "name": "model_type", "value": "Graphical models" } ] @@ -12396,11 +12396,11 @@ "content": "The modular nature of DTMs enables various hardware implementations. For example, each EBM in the chain can be implemented using distinct physical circuitry on the same chip, as shown in Fig. 3 (b). Alternatively, the various EBMs may be split across several communicating chips or implemented by the same hardware, reprogrammed with distinct sets of weights at different times.", "attributes": [ { - "attribute": "modularity", + "name": "modularity", "value": "Various implementations" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 3 (b)" } ] @@ -12411,15 +12411,15 @@ "content": "To understand the performance of a future hardware device, we developed a GPU simulator of the DTCA and used it to train a DTM on the Fashion-MNIST dataset. We measure the performance of the DTM using FID and utilize a physical model to estimate the energy required to generate new images. These numbers can be compared to conventional algorithm/hardware pairings, such as a VAE running on a GPU; these results are shown in Fig. 1.", "attributes": [ { - "attribute": "dataset", + "name": "dataset", "value": "Fashion-MNIST" }, { - "attribute": "performance_metric", + "name": "performance_metric", "value": "FID" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 1" } ] @@ -12430,15 +12430,15 @@ "content": "Boltzmann machines are hardware efficient because the Gibbs sampling update rule required to sample from them is simple. Boltzmann machines implement energy functions of the form E(x) =−β⟨∑i̸=j xiJijxj + ∑i=1 hixi⟩,(10), where eachx i ∈ {−1,1}.", "attributes": [ { - "attribute": "efficiency_reason", + "name": "efficiency_reason", "value": "Simple Gibbs sampling" }, { - "attribute": "variable_type", + "name": "variable_type", "value": "Binary" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. (10)" } ] @@ -12449,15 +12449,15 @@ "content": "Implementing our proposed hardware architecture using Boltzmann machines is particularly simple. A device will consist of a regular grid of Bernoulli sampling circuits, where each sampling circuit implements the Gibbs sampling update for a single variablex i. The bias of the sampling circuits (probability that it produces 1 as opposed to−1) is constrained to be a sigmoidal function of an input voltage, allowing the conditional update given in Eq. (11) to be implemented using a simple circuit that adds currents such as a resistor network.", "attributes": [ { - "attribute": "circuit_type", + "name": "circuit_type", "value": "Bernoulli sampling circuits" }, { - "attribute": "bias_control", + "name": "bias_control", "value": "Sigmoidal function" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. (11)" } ] @@ -12468,15 +12468,15 @@ "content": "Due to our chosen connectivity patterns, our Boltzmann machines are bipartite (two-colorable). Since each color block can be sampled in parallel, a single iteration of Gibbs sampling corresponds to sampling the first colorblockconditiononthesecondandthenviceversa. Starting from some random initialization, this block sampling procedure could then be repeated forKiterations (whereKis longer than the mixing time of the sampler, typicallyK≈1000) to draw samples from Eq. (7) for each step in the approximation to the reverse process.", "attributes": [ { - "attribute": "grid_size", + "name": "grid_size", "value": "70×70" }, { - "attribute": "connections", + "name": "connections", "value": "12 neighbors" }, { - "attribute": "iterations", + "name": "iterations", "value": "K≈1000" } ] @@ -12487,15 +12487,15 @@ "content": "To enable a near-term, large-scale realization of the DTCA, we leveraged the shot-noise dynamics of subthreshold transistors [45] to build an RNG that is fast, energy-efficient, and small. Our all-transistor RNG is programmable and has the desired sigmoidal response to a control voltage, as shown by experimental measurements in Fig. 4 (a). The stochastic voltage signal output from the RNG has an approximately exponential autocorrelation function that decays in around100ns, as illustrated in Fig. 4 (b).", "attributes": [ { - "attribute": "technology", + "name": "technology", "value": "Subthreshold transistors [45]" }, { - "attribute": "performance", + "name": "performance", "value": "100ns decay" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 4 (a), (b)" } ] @@ -12506,11 +12506,11 @@ "content": "Critically, Eq. (8) places no constraints on the form ofE θ t−1. Therefore, we are free to use EBMs that our hardware implements especially efficiently.", "attributes": [ { - "attribute": "advantage", + "name": "advantage", "value": "Hardware efficiency" }, { - "attribute": "constraint", + "name": "constraint", "value": "None on Eθ t−1" } ] @@ -12521,11 +12521,11 @@ "content": "To enable a near-term, large-scale realization of the DTCA, we leveraged the shot-noise dynamics of subthreshold transistors [45] to build an RNG that is fast, energy-efficient, and small.", "attributes": [ { - "attribute": "strategy", + "name": "strategy", "value": "Near-term realization" }, { - "attribute": "component", + "name": "component", "value": "RNG implementation" } ] @@ -12536,11 +12536,11 @@ "content": "Refer to Appendices B and C for a further theoretical discussion of the hardware architecture. See Appendix D.1. Appendix C provides further details on the Boltzmann machine architecture. Appendix J provides further details about our RNG.", "attributes": [ { - "attribute": "documentation", + "name": "documentation", "value": "Multiple appendices" }, { - "attribute": "topics", + "name": "topics", "value": "Theoretical discussion, architecture details" } ] @@ -12551,15 +12551,15 @@ "content": "We measure the performance of the DTM using FID and utilize a physical model to estimate the energy required to generate new images. These numbers can be compared to conventional algorithm/hardware pairings, such as a VAE running on a GPU; these results are shown in Fig. 1.", "attributes": [ { - "attribute": "evaluation_metrics", + "name": "evaluation_metrics", "value": "FID, Energy consumption" }, { - "attribute": "baseline", + "name": "baseline", "value": "VAE on GPU" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 1" } ] @@ -12570,11 +12570,11 @@ "content": "FIG. 4.A programmable source of random bits. (a)A laboratory measurement of the operating characteristic of our RNG. The probability of the output voltage signal being in the high state (x= 1) can be programmed by varying an input voltage. The relationship betweenP(x= 1)and the input voltage is well-approximated by a sigmoid function.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Fig. 4(a)" }, { - "attribute": "component", + "name": "component", "value": "RNG operating characteristics" } ] @@ -12585,15 +12585,15 @@ "content": "(b)The autocorrelation function of the RNG at the unbiased point (P(x= 1) = 0.5). The decay is approximately exponential with the rateτ0 ≈100ns.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Fig. 4(b)" }, { - "attribute": "measurement", + "name": "measurement", "value": "autocorrelation decay" }, { - "attribute": "value", + "name": "value", "value": "τ0 ≈ 100ns" } ] @@ -12604,15 +12604,15 @@ "content": "(c)Estimating the effect of manufacturing variation on RNG performance. Each point in the plot represents the results of a simulation of an RNG circuit with transistor parameters sampled according to a procedure defined by the manufacturer's PDK. Each color represents a different process corner, each for which∼200realizations of the RNG were simulated. The \"typical\" corner represents a balanced case, whereas the other two are asymmetric corners where the two types of transistors (NMOS and PMOS) are skewed in opposite directions. The slow NMOS and fast PMOS case is worst performing for us due to an asymmetry in our design.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Fig. 4(c)" }, { - "attribute": "analysis_type", + "name": "analysis_type", "value": "manufacturing variation" }, { - "attribute": "finding", + "name": "finding", "value": "Reliable across process corners" } ] @@ -12623,15 +12623,15 @@ "content": "The energy estimates given in Fig. 1 for the probabilistic computer were constructed using a physical model of an all-transistor Boltzmann machine Gibbs sampler. The dominant contributions to this model are captured by the formula E=T KmixL2Ecell,(12) Ecell =E rng +E bias +E clock +E comm,(13) whereE rng comes from the data in Fig. 4 (c).", "attributes": [ { - "attribute": "model_type", + "name": "model_type", "value": "Boltzmann machine Gibbs sampler" }, { - "attribute": "components", + "name": "components", "value": "RNG, bias, clock, communication" }, { - "attribute": "equation", + "name": "equation", "value": "Ecell = E_rng + E_bias + E_clock + E_comm" } ] @@ -12642,15 +12642,15 @@ "content": "Generally, given the same transistor process we used for our RNG and some reasonable selections for other free parameters of the model, we can estimate Ecell ≈2fJ. See Appendix D for an exhaustive derivation of this model.", "attributes": [ { - "attribute": "value", + "name": "value", "value": "Ecell ≈ 2fJ" }, { - "attribute": "derivation", + "name": "derivation", "value": "Physical model" }, { - "attribute": "source", + "name": "source", "value": "Appendix D" } ] @@ -12661,15 +12661,15 @@ "content": "We use a simple model for the energy consumption of the GPU that underestimates the actual values. We compute the total number of floating-point operations (FLOPs) required to generate a sample from the trained model and divide that by the FLOP/joule specification given by the manufacturer.", "attributes": [ { - "attribute": "method", + "name": "method", "value": "FLOPs / manufacturer specification" }, { - "attribute": "component", + "name": "component", "value": "GPU energy estimation" }, { - "attribute": "reference", + "name": "reference", "value": "Appendix E" } ] @@ -12680,15 +12680,15 @@ "content": "The EBMs used in the experiments presented in Fig. 1 were trained by applying the standard Monte-Carlo estimator for the gradients of EBMs [61] to Eq. (4), which yields ∇θLDN (θ)= TX t=1 EQ(xt−1,xt) [EPθ(zt−1|xt−1,xt) [∇θEm t−1 ] −EPθ(xt−1,zt−1|xt) [∇θEm t−1 ] ] . (14) Notably, each term in the sum overtcan be computed independently.", "attributes": [ { - "attribute": "method", + "name": "method", "value": "Monte-Carlo gradient estimation" }, { - "attribute": "equation", + "name": "equation", "value": "Eq. (14)" }, { - "attribute": "characteristic", + "name": "characteristic", "value": "Independent term computation" } ] @@ -12699,11 +12699,11 @@ "content": "It should be noted that the DTCA allows our EBMs to have finite and short mixing times, which enables sufficient sampling iterations to be used to achieve nearly unbiased estimates of the gradient. Unbiased gradient estimates are not possible for MEBMs in most cases due to their long mixing times [62].", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "reference", + "name": "reference", "value": "[62]" } ] @@ -12714,15 +12714,15 @@ "content": "DTMs alleviate the training instability that is fundamental to MEBMs... An example of the training dynamics for several different types of models is shown in Fig. 5 (b)... Complementing DTMs with the ACP completely stabilizes training.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 5(b)" }, { - "attribute": "method", + "name": "method", "value": "DTM + ACP" } ] @@ -12733,11 +12733,11 @@ "content": "However, as these gradients are followed, the MEBM is reshaped according to the data distribution and begins to become complex and multimodal. This induced multimodality greatly increases the sampling complexity of the distribution, causing samples to deviate from equilibrium. Gradients computed using non-equilibrium samples do not necessarily point in a meaningful direction, which can halt or, in some cases, even reverse the training process.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "problem", + "name": "problem", "value": "training instability" } ] @@ -12748,15 +12748,15 @@ "content": "The lower plot in Fig. 5 (b) shows the autocorrelation at a delay equal to the total number of sampling iterations used to estimate the gradients during training. Generally, if r_yy is close to 1, gradients were estimated using far-from-equilibrium samples and were likely of low quality. If it is close to zero, the samples should be close to equilibrium and produce high-quality gradient estimates.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "metric", + "name": "metric", "value": "normalized autocorrelation r_yy[k]" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. (15), (16)" } ] @@ -12767,11 +12767,11 @@ "content": "Denoising alone significantly stabilizes training. Because the transformation carried out by each layer is simpler, the distribution that the model must learn is less complex and, therefore, easier to sample from.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "method", + "name": "method", "value": "denoising" } ] @@ -12782,15 +12782,15 @@ "content": "The effect of scaling EBM complexity on DTM performance. The grid size L was modified to change the number of latent variables compared to the (fixed) number of data variables. Generally, EBM layers with more connectivity and longer allowed mixing times can utilize more latent variables and, therefore, achieve higher performance.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "text" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 5(c)" }, { - "attribute": "dataset", + "name": "dataset", "value": "Fashion-MNIST" } ] @@ -12801,11 +12801,11 @@ "content": "As training progresses, the DTM eventually becomes unstable, which can be attributed to the development of a complex energy landscape among the latent variables.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Training Stability" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -12816,11 +12816,11 @@ "content": "We add a term to the loss function that nudges the optimization towards a distribution that is easy to sample from", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Training Procedure" }, { - "attribute": "method", + "name": "method", "value": "Total Correlation Penalty" } ] @@ -12831,11 +12831,11 @@ "content": "The total loss function is the sum of Eq. (4) and this total correlation penalty: L=L DN + Σ_{t=1}^{T} λtLTC t", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Loss Function" }, { - "attribute": "equation", + "name": "equation", "value": "(18)" } ] @@ -12846,11 +12846,11 @@ "content": "We use an Adaptive Correlation Penalty (ACP) to set the λt as large as necessary to keep sampling tractable for each layer", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Adaptive Control" }, { - "attribute": "method", + "name": "method", "value": "ACP" } ] @@ -12861,15 +12861,15 @@ "content": "As shown in Fig. 1, increasing the depth of the DTM from 2 to 8 substantially improves the quality of generated images", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Scaling Analysis" }, { - "attribute": "figure", + "name": "figure", "value": "Fig. 1" }, { - "attribute": "improvement", + "name": "improvement", "value": "substantial" } ] @@ -12880,11 +12880,11 @@ "content": "which demonstrates that larger values of K are required to support wider models holding connectivity constant", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Scaling Constraints" }, { - "attribute": "parameter", + "name": "parameter", "value": "K" } ] @@ -12895,11 +12895,11 @@ "content": "we hypothesize that the correct way to scale probabilistic machine learning hardware systems is not in isolation but rather as a component in a larger hybrid thermodynamic-deterministic machine learning (HTDML) system", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Conclusion" }, { - "attribute": "hypothesis", + "name": "hypothesis", "value": "HTDML scaling approach" } ] @@ -12910,11 +12910,11 @@ "content": "It would be naive to expect that a hardware-efficient EBM topology can be scaled in isolation to model arbitrarily complex datasets", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Scaling Limitations" }, { - "attribute": "confidence", + "name": "confidence", "value": "qualified" } ] @@ -12925,11 +12925,11 @@ "content": "A hybrid approach is sensible because there is no a priori reason to believe that a probabilistic computer should handle every part of a machine learning problem, and sometimes a deterministic processor is likely a better tool for the job", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Hybrid Approach Rationale" }, { - "attribute": "rationale", + "name": "rationale", "value": "task-specific suitability" } ] @@ -12940,15 +12940,15 @@ "content": "Model quality increases monotonically, and the autocorrelation stays small throughout training. This closed-loop control of the correlation penalty was employed during the training of most models used to produce the results in this article", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Training Dynamics" }, { - "attribute": "policy", + "name": "policy", "value": "closed-loop control" }, { - "attribute": "figure", + "name": "figure", "value": "Fig. 5 (b)" } ] @@ -12959,15 +12959,15 @@ "content": "Mathematically, the landscape of HTDML may be summarized as Etot(S, D, p) =Edet(S, D, p) +Eprob(S, D, p)", "attributes": [ { - "attribute": "section", + "name": "section", "value": "HTDML Formulation" }, { - "attribute": "equation", + "name": "equation", "value": "(19)" }, { - "attribute": "components", + "name": "components", "value": "deterministic + probabilistic" } ] @@ -12978,15 +12978,15 @@ "content": "The DTM is trained to generate CIFAR-10 images and achieves performance parity with a traditional GAN using a∼10×smaller deterministic neural network.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Figure 6 description" }, { - "attribute": "dataset", + "name": "dataset", "value": "CIFAR-10" }, { - "attribute": "comparison", + "name": "comparison", "value": "DTM vs traditional GAN" } ] @@ -12997,15 +12997,15 @@ "content": "Indeed, binarization is not viable in general, and embedding into richer types of variables (such as categorical) at the probabilistic hardware level is not particularly efficient or principled.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Technical analysis section" }, { - "attribute": "method", + "name": "method", "value": "binarization critique" }, { - "attribute": "scope", + "name": "scope", "value": "general applicability" } ] @@ -13016,15 +13016,15 @@ "content": "One major flaw with our method is that the autoencoder and DTM are not jointly trained, which means that the embedding learned by the autoencoder may not be well-suited to the way information can flow in the DTM, given its limited connectivity.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Analysis of embedding method" }, { - "attribute": "limitation", + "name": "limitation", "value": "joint training not implemented" }, { - "attribute": "hardware_constraint", + "name": "hardware_constraint", "value": "DTM limited connectivity" } ] @@ -13035,15 +13035,15 @@ "content": "Based on the size of our RNG, it can be estimated that∼10 6 sampling cells could be fit into a6×6µm chip (see Appendix J).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Scalability analysis" }, { - "attribute": "chip_size", + "name": "chip_size", "value": "6×6µm" }, { - "attribute": "capacity", + "name": "capacity", "value": "~10^6 sampling cells" } ] @@ -13054,15 +13054,15 @@ "content": "In contrast, the largest DTM shown in Fig. 1 would use only around 50,000 cells.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Scalability comparison" }, { - "attribute": "current_model_size", + "name": "current_model_size", "value": "~50,000 cells" }, { - "attribute": "potential_capacity", + "name": "potential_capacity", "value": "~10^6 cells" } ] @@ -13073,15 +13073,15 @@ "content": "Like many engineered systems, optimal solutions will be found somewhere in the middle, where the contributions from the various subsystems are nearly balanced [65–67].", "attributes": [ { - "attribute": "source", + "name": "source", "value": "System design philosophy" }, { - "attribute": "approach", + "name": "approach", "value": "balanced subsystem design" }, { - "attribute": "reference", + "name": "reference", "value": "[65-67]" } ] @@ -13092,15 +13092,15 @@ "content": "One difficulty with HTDML research is that simulating large hardware EBMs on GPUs can be a challenging task. GPUs run these EBMs much less efficiently than probabilistic computers and the sparse data structures that naturally arise when working with hardware EBMs do not mesh well with regular tensor data types.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Research challenges" }, { - "attribute": "platform", + "name": "platform", "value": "GPU simulation" }, { - "attribute": "issue", + "name": "issue", "value": "sparse data structures vs regular tensors" } ] @@ -13111,15 +13111,15 @@ "content": "We have both short and long-term solutions to these challenges. To address these challenges in the short term, we have open-sourced a software library [69] that enables XLA-accelerated [70] simulation of hardware EBMs. This library is written in JAX [71] and automates the complex slicing operations that enable hardware EBM sampling.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Software solution" }, { - "attribute": "technology", + "name": "technology", "value": "JAX with XLA acceleration" }, { - "attribute": "availability", + "name": "availability", "value": "open-sourced" } ] @@ -13130,19 +13130,19 @@ "content": "10\n[1] A. A. Chien, Commun. ACM66, 5 (2023).\n[2] D. D. Stine,The Manhattan Project, the Apollo Program,\nand Federal Energy Technology R&D Programs: A Com-\nparative Analysis, Report RL34645 (Congressional Re-\nsearch Service, Washington, D.C., 2009).\n[3] J. Aljbour, T. Wilson, and P. Patel, EPRI White Paper\nno. 3002028905 (2024).\n[4] Y. Li, D. Choi, J. Chung, N. Kushman, J. Schrittwieser,\nR. Leblond, T. Eccles, J. Keeling, F. Gimeno, A. D.\nLago, T. Hubert, P. Choy, C. de Masson d'Autume,\nI. Babuschkin, X. Chen, P.-S. Huang, J. Welbl, S. Gowal,\nA. Cherepanov, J. Molloy, D. J. Mankowitz, E. S. Rob-\nson, P. Kohli, N. de Freitas, K. Kavukcuoglu, and\nO. Vinyals, Science378, 1092 (2022).\n[5] D. M. Katz, M. J. Bommarito, S. Gao, and P. Arredondo,\nPhilos. Trans. R. Soc. A382, 20230254 (2024).\n[6] H. Nori, N. King, S. M. McKinney, D. Carignan, and\nE. Horvitz, arXiv [cs.CL] (2023).\n[7] S. Noy and W. Zhang, Science381, 187 (2023).\n[8] E. Brynjolfsson, D. Li, and L. Raymond, Q. J. Econ.\n10.1093/qje/qjae044 (2025).\n[9] S. Peng, E. Kalliamvakou, P. Cihon, and M. Demirer,\narXiv [cs.SE] (2023).\n[10] A. Bick, A. Blandin, and D. J. Deming, The rapid adop-\ntion of generative ai, Tech. Rep. (National Bureau of Eco-\nnomic Research, 2024).\n[11] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit,\nL. Jones, A. N. Gomez, L. u. Kaiser, and I. Polosukhin,\ninAdvances in Neural Information Processing Systems,\nVol. 30, edited by I. Guyon, U. V. Luxburg, S. Bengio,\nH. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett\n(Curran Associates, Inc., 2017).\n[12] A. Coates, B. Huval, T. Wang, D. J. Wu, A. Y. Ng,\nand B. Catanzaro, inProceedings of the 30th Interna-\ntional Conference on International Conference on Ma-\nchine Learning - Volume 28, ICML'13 (JMLR.org, 2013)\np. III–1337–III–1345.\n[13] K. Chellapilla, S. Puri, and P. Simard, inTenth Inter-\nnational Workshop on Frontiers in Handwriting Recogni-\ntion, edited by G. Lorette, Université de Rennes 1 (Su-\nvisoft, La Baule (France), 2006).\n[14] H. Xiao, K. Rasul, and R. Vollgraf, arXiv [cs.LG] (2017).\n[15] M. Heusel, H. Ramsauer, T. Unterthiner, B. Nessler,\nand S. Hochreiter, inAdvances in Neural Information Pro-\ncessing Systems, Vol. 30, edited by I. Guyon, U. V.\nLuxburg, S. Bengio, H. Wallach, R. Fergus, S. Vish-\nwanathan, and R. Garnett (Curran Associates, Inc.,\n2017).\n[16] D.P.KingmaandM.Welling,Auto-Encoding Variational\nBayes(2022).\n[17] I. J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu,\nD. Warde-Farley, S. Ozair, A. Courville, and Y. Ben-\nbio, inAdvances in Neural Information Processing Sys-\ntems, Vol. 27, edited by Z. Ghahramani, M. Welling,\nC. Cortes, N. Lawrence, and K. Weinberger (Curran As-\nsociates, Inc., 2014).\n[18] J. Sohl-Dickstein, E. Weiss, N. Maheswaranathan, and\nS. Ganguli, inProceedings of the 32nd International Con-\nference on Machine Learning, Proceedings of Machine\nLearning Research, Vol. 37, edited by F. Bach and D. Blei\n(PMLR, Lille, France, 2015) pp. 2256–2265.\n[19] S. Hooker, Commun. ACM64, 58–65 (2021).\n[20] S. Ambrogio, P. Narayanan, A. Okazaki, A. Fasoli,\nC. Mackin, K. Hosokawa, A. Nomura, T. Yasuda,\nA. Chen, A. Friz,et al., Nature620, 768 (2023).\n[21] S. Bandyopadhyay, A. Sludds, S. Krastanov, R. Hamerly,\nN. Harris, D. Bunandar, M. Streshinsky, M. Hochberg,\nand D. Englund, Nat. Photon.18, 1335 (2024).\n[22] H. A. Gonzalez, J. Huang, F. Kelber, K. K. Nazeer,\nT.Langer, C.Liu, M.Lohrmann, A.Rostami, M.Schone,\nB. Vogginger,et al., arXiv [cs.ET] (2024).\n[23] S. B. Shrestha, J. Timcheck, P. Frady, L. Campos-\nMacias, and M. Davies, inICASSP 2024 - 2024 IEEE\nInternational Conference on Acoustics, Speech and Sig-\nnal Processing (ICASSP)(2024) pp. 13481–13485.\n[24] Y. Sun, N. B. Agostini, S. Dong, and D. Kaeli, arXiv\n[cs.DC] (2019).\n[25] Y. Song and S. Ermon, inAdvances in Neural Informa-\ntion Processing Systems, Vol. 32, edited by H. Wallach,\nH. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox,\nand R. Garnett (Curran Associates, Inc., 2019).\n[26] M. Janner, Y. Du, J. Tenenbaum, and S. Levine, inInter-\nnational Conference on Machine Learning(PMLR, 2022)\npp. 9902–9915.\n[27] N. S. Singh, K. Kobayashi, Q. Cao, K. Selcuk, T. Hu,\nS. Niazi, N. A. Aadit, S. Kanai, H. Ohno, S. Fukami,\net al., Nat. Commun.15, 2685 (2024).\n[28] C. Pratt, K. Ray, and J. Crutchfield,Dynamical Com-\nputing on the Nanoscale: Superconducting Circuits for\nThermodynamically-Efficient Classical Information Pro-\ncessing(2023).\n[29] G. Wimsatt, O.-P. Saira, A. B. Boyd, M. H. Matheny,\nS. Han, M. L. Roukes, and J. P. Crutchfield, Phys. Rev.\nRes.3, 033115 (2021).\n[30] S. H. Adachi and M. P. Henderson,Application of Quan-\ntum Annealing to Training of Deep Neural Networks\n(2015).\n[31] B. Sutton, K. Y. Camsari, B. Behin-Aein, and S. Datta,\nSci. Rep.7, 44370 (2017).\n[32] R. Faria, K. Y. Camsari, and S. Datta, IEEE Magn. Lett.\n8, 1 (2017).\n[33] S.Niazi, S.Chowdhury, N.A.Aadit, M.Mohseni, Y.Qin,\nand K. Y. Camsari, Nat. Electron.7, 610 (2024).\n[34] W. A. Borders, A. Z. Pervaiz, S. Fukami, K. Y. Camsari,\nH. Ohno, and S. Datta, Nature573, 390 (2019).\n[35] N. S. Singh, K. Kobayashi, Q. Cao, K. Selcuk, T. Hu,\nS. Niazi, N. A. Aadit, S. Kanai, H. Ohno, S. Fukami,\net al., Nat. Commun.15, 2685 (2024).\n[36] M. M. H. Sajeeb, N. A. Aadit, S. Chowdhury, T. Wu,\nC. Smith, D. Chinmay, A. Raut, K. Y. Camsari, C. Dela-\ncour, and T. Srimani, Phys. Rev. Appl.24, 014005\n(2025).\n[37] T. Conte, E. DeBenedictis, N. Ganesh, T. Hylton,\nJ. P. Strachan, R. S. Williams, A. Alemi, L. Altenberg,\nG. Crooks, J. Crutchfield,et al., arXiv [cs.CY] (2019).\n[38] Y. Du and I. Mordatch, inAdvances in Neural Informa-\ntion Processing Systems, Vol. 32, edited by H. Wallach,\nH. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox,\nand R. Garnett (Curran Associates, Inc., 2019).\n[39] W. Lee, H. Kim, H. Jung, Y. Choi, J. Jeon, and C. Kim,\nSci. Rep.15, 8018 (2025).\n[40] M. Horodynski, C. Roques-Carmes, Y. Salamin, S. Choi,", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "10" }, { - "attribute": "section", + "name": "section", "value": "References/Bibliography" }, { - "attribute": "total_entries", + "name": "total_entries", "value": "40" }, { - "attribute": "date_range", + "name": "date_range", "value": "2006-2025" } ] @@ -13153,15 +13153,15 @@ "content": "[4] Y. Li, D. Choi, J. Chung, N. Kushman, J. Schrittwieser,\nR. Leblond, T. Eccles, J. Keeling, F. Gimeno, A. D.\nLago, T. Hubert, P. Choy, C. de Masson d'Autume,\nI. Babuschkin, X. Chen, P.-S. Huang, J. Welbl, S. Gowal,\nA. Cherepanov, J. Molloy, D. J. Mankowitz, E. S. Rob-\nson, P. Kohli, N. de Freitas, K. Kavukcuoglu, and\nO. Vinyals, Science378, 1092 (2022).\n[5] D. M. Katz, M. J. Bommarito, S. Gao, and P. Arredondo,\nPhilos. Trans. R. Soc. A382, 20230254 (2024).\n[6] H. Nori, N. King, S. M. McKinney, D. Carignan, and\nE. Horvitz, arXiv [cs.CL] (2023).\n[7] S. Noy and W. Zhang, Science381, 187 (2023).\n[8] E. Brynjolfsson, D. Li, and L. Raymond, Q. J. Econ.\n10.1093/qje/qjae044 (2025).\n[10] A. Bick, A. Blandin, and D. J. Deming, The rapid adop-\ntion of generative ai, Tech. Rep. (National Bureau of Eco-\nnomic Research, 2024).", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "10" }, { - "attribute": "focus_area", + "name": "focus_area", "value": "Recent AI Research (2022-2025)" }, { - "attribute": "publication_types", + "name": "publication_types", "value": "Science, arXiv, Economic Research" } ] @@ -13172,15 +13172,15 @@ "content": "[11] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit,\nL. Jones, A. N. Gomez, L. u. Kaiser, and I. Polosukhin,\ninAdvances in Neural Information Processing Systems,\nVol. 30, edited by I. Guyon, U. V. Luxburg, S. Bengio,\nH. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett\n(Curran Associates, Inc., 2017).\n[14] H. Xiao, K. Rasul, and R. Vollgraf, arXiv [cs.LG] (2017).\n[16] D.P.KingmaandM.Welling,Auto-Encoding Variational\nBayes(2022).\n[17] I. J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu,\nD. Warde-Farley, S. Ozair, A. Courville, and Y. Ben-\nbio, inAdvances in Neural Information Processing Sys-\ntems, Vol. 27, edited by Z. Ghahramani, M. Welling,\nC. Cortes, N. Lawrence, and K. Weinberger (Curran As-\nsociates, Inc., 2014).\n[15] M. Heusel, H. Ramsauer, T. Unterthiner, B. Nessler,\nand S. Hochreiter, inAdvances in Neural Information Pro-\ncessing Systems, Vol. 30, edited by I. Guyon, U. V.\nLuxburg, S. Bengio, H. Wallach, R. Fergus, S. Vish-\nwanathan, and R. Garnett (Curran Associates, Inc.,\n2017).", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "10" }, { - "attribute": "topic_area", + "name": "topic_area", "value": "Neural Network Foundations" }, { - "attribute": "key_papers", + "name": "key_papers", "value": "Transformers, GANs, VAEs, Deep Learning" } ] @@ -13191,15 +13191,15 @@ "content": "[28] C. Pratt, K. Ray, and J. Crutchfield,Dynamical Com-\nputing on the Nanoscale: Superconducting Circuits for\nThermodynamically-Efficient Classical Information Pro-\ncessing(2023).\n[29] G. Wimsatt, O.-P. Saira, A. B. Boyd, M. H. Matheny,\nS. Han, M. L. Roukes, and J. P. Crutchfield, Phys. Rev.\nRes.3, 033115 (2021).\n[30] S. H. Adachi and M. P. Henderson,Application of Quan-\ntum Annealing to Training of Deep Neural Networks\n(2015).\n[31] B. Sutton, K. Y. Camsari, B. Behin-Aein, and S. Datta,\nSci. Rep.7, 44370 (2017).\n[32] R. Faria, K. Y. Camsari, and S. Datta, IEEE Magn. Lett.\n8, 1 (2017).\n[33] S.Niazi, S.Cowdhury, N.A.Aadit, M.Mohseni, Y.Qin,\nand K. Y. Camsari, Nat. Electron.7, 610 (2024).\n[34] W. A. Borders, A. Z. Pervaiz, S. Fukami, K. Y. Camsari,\nH. Ohno, and S. Datta, Nature573, 390 (2019).", "attributes": [ { - "attribute": "page_number", + "name": "page_number", "value": "10" }, { - "attribute": "research_area", + "name": "research_area", "value": "Quantum Computing & Neuromorphic Computing" }, { - "attribute": "applications", + "name": "applications", "value": "Neural Network Training, Information Processing" } ] @@ -13210,15 +13210,15 @@ "content": "Denoising diffusion models try to learn to time-reverse a random process that converts data into simple noise. Here, we will review some details on how these models work to support the analysis in the main text.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Introduction" } ] @@ -13229,15 +13229,15 @@ "content": "The forward process is a random process that is used to convert the data distribution into noise. This conversion into noise is achieved through a stochastic differential equation in the continuous-variable case and a Markov jump process in the discrete case.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Forward Processes" } ] @@ -13248,19 +13248,19 @@ "content": "In the continuous case, the typical choice of forward process is the Itô diffusion, dX(t) =−X(t)dt+ √2σdW where X(t) is a length N vector representing the state variable at time t, σ is a constant, and dW is a length N vector of independent Wiener processes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Continuous Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A1" } ] @@ -13271,19 +13271,19 @@ "content": "The transition kernel for a random process defines how the probability distribution evolves in time, Qt|0(x′|x) =P(X(t) =x ′|X(0) =x)", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Continuous Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A2" } ] @@ -13294,19 +13294,19 @@ "content": "For the case of Eq. (A1) the transition kernel is, Qt+s|s(x′|x)∝e^−1/2 (x′−µ)^T Σ^−1(x′−µ) µ=e^−tx Σ =σ²I(1−e^−2t) this solution can be verified by direct substitution into the corresponding Fokker-Planck equation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Continuous Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A3-A5" } ] @@ -13317,15 +13317,15 @@ "content": "In the limit of infinite time,µ→0andΣ→σ²I. Therefore, the stationary distribution of this process is zero-mean Gaussian noise with a standard deviation ofσ.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Continuous Variables" } ] @@ -13336,19 +13336,19 @@ "content": "The stochastic dynamics of some discrete variableXmay be described by the Markov jump process, dQt dt =LQ t where L is the generator of the dynamics, which is anM×Mmatrix that stores the transition rates between the various states.Q t is a lengthMvector that assigns a probability to each possible stateXmay take at timet.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Discrete Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A6" } ] @@ -13359,19 +13359,19 @@ "content": "The transition rate from theith state to thejth state is given by the matrix elementL[j, i], which here takes the particular form, L[j, i] =γ(−(M−1)δ j,i + (1−δ j,i)) whereδis used to indicate the Kronecker delta function. Eq. (A7) describes a random process where the probability per unit time to jump between any two states isγ.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Discrete Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A7" } ] @@ -13382,19 +13382,19 @@ "content": "Since Eq. (A6) is linear, the dynamics ofQt can be understood entirely via the eigenvalues and eigenvectors ofL, Lvk =λ kvk", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "11" }, { - "attribute": "topic", + "name": "topic", "value": "Discrete Variables" }, { - "attribute": "equation", + "name": "equation", "value": "A8" } ] @@ -13405,11 +13405,11 @@ "content": "Denoising diffusion models try to learn to time-reverse a random process that converts data into simple noise. Here, we will review some details on how these models work to support the analysis in the main text.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13420,11 +13420,11 @@ "content": "The forward process is a random process that is used to convert the data distribution into noise. This conversion into noise is achieved through a stochastic differential equation in the continuous-variable case and a Markov jump process in the discrete case.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13435,11 +13435,11 @@ "content": "In the continuous case, the typical choice of forward process is the Itô diffusion, dX(t) =−X(t)dt+√2σdW where X(t) is a length N vector representing the state variable at time t, σ is a constant, and dW is a length N vector of independent Wiener processes.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13450,11 +13450,11 @@ "content": "The transition kernel for a random process defines how the probability distribution evolves in time, Qt|0(x′|x) =P(X(t) =x ′|X(0) =x)", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13465,11 +13465,11 @@ "content": "For the case of Eq. (A1) the transition kernel is, Qt+s|s(x′|x)∝e −1/2 (x′−μ)T Σ−1(x′−μ) μ=e −tx Σ =σ2I 1−e −2t this solution can be verified by direct substitution into the corresponding Fokker-Planck equation.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13480,11 +13480,11 @@ "content": "In the limit of infinite time,μ→0andΣ→σ2I. Therefore, the stationary distribution of this process is zero-mean Gaussian noise with a standard deviation ofσ.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13495,11 +13495,11 @@ "content": "The stochastic dynamics of some discrete variableXmay be described by the Markov jump process, dQt/dt =LQ t whereLis the generator of the dynamics, which is anM×Mmatrix that stores the transition rates between the various states.Q t is a lengthMvector that assigns a probability to each possible stateXmay take at timet.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13510,11 +13510,11 @@ "content": "The transition rate from theith state to thejth state is given by the matrix elementL[j, i], which here takes the particular form, L[j, i] =γ(−(M−1)δj,i + (1−δj,i)) whereδis used to indicate the Kronecker delta function. Eq. (A7) describes a random process where the probability per unit time to jump between any two states isγ.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13525,11 +13525,11 @@ "content": "Since Eq. (A6) is linear, the dynamics ofQt can be understood entirely via the eigenvalues and eigenvectors ofL, Lvk =λ kvk", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "12" } ] @@ -13540,11 +13540,11 @@ "content": "One eigenvector-eigenvalue pair(v0, λ0 = 0)corresponds to the unique stationary state ofL, with all entries ofv0 being equal to some constant (if normalized, thenv0[j] = 1/M for allj). The remaining eigenvectors are decaying modes associated with negative eigenvalues. These additionalM−1 eigenvectors take the form, vj[i] =−δi,0 +δi,j λj =−γM where Eq. (A9) and Eq. (A10) are valid forj∈[1, M−1]. Therefore, all solutions to this MJP decay exponentially to the uniform distribution with rateγM.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -13555,11 +13555,11 @@ "content": "The time-evolution ofQis given by the matrix exponential, Qt =e LtQ0.This matrix exponential is evaluated by diagonalizingL, eLt =P eDtP−1 where the columns ofPare theMeigenvectorsv k andDis a diagonal matrix of the eigenvaluesλk.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -13570,11 +13570,11 @@ "content": "Using the solution for the eigenvalues and eigenvectors found above, we can solve for the matrix elements ofeLt, eLt [j, i] =δi,j (1 + (M−1)e −γMt/M) + (1−δi,j)(1−e −γMt/M) Using this solution, we can deduce an exponential form for the matrix elements ofeLt, eLt [j, i] = 1/Z(t) eΓ(t)δi,j Γ(t) = ln((1 + (M−1)e −γt)/(1−e −γt)) Z(t) = M/(1−e −γt)", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -13585,11 +13585,11 @@ "content": "Now consider a process in which each element of the vector ofNdiscrete variablesXundergoes the dynamics described by Eq. (A6) independently. In that case, the differential equation describing the dynamics of the joint distributionQ t is, dQt/dt = NX k=1 (I1 ⊗ ··· ⊗ Lk ⊗. . . IN )Q t whereI j indicates the identity operator andLj the operator from Eq. (A7) acting on the subspace of thejth discrete variable.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -13600,11 +13600,11 @@ "content": "The Kronecker product of the matrix exponentials gives the time-evolution of the joint distribution, eLt = NO k=1 eLkt", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix A: Denoising Diffusion Models" }, { - "attribute": "page", + "name": "page", "value": "13" } ] @@ -13615,11 +13615,11 @@ "content": "In the case that the forward process is an Itô diffusion,Lis the generator for the corresponding Fokker-Planck equation,\nL=−\nX\ni\n∂\n∂xi\nfi(x, t) +1\n2\nX\ni,j\n∂\n∂xi\n∂\n∂xj\nDij(t)(A29)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A29)" }, { - "attribute": "type", + "name": "type", "value": "continuous_diffusion" } ] @@ -13630,11 +13630,11 @@ "content": "Using Eq. (A28) and integration by parts, it can be shown that the adjoint operator is,\nL† =\nX\ni\nfi\n∂\n∂xi\n+ 1\n2\nX\ni,j\nDij\n∂\n∂xi\n∂\n∂xi\n(A30)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A30)" }, { - "attribute": "type", + "name": "type", "value": "adjoint_operator" } ] @@ -13645,11 +13645,11 @@ "content": "By directly substituting Eq. (A30) into Eq. (A27) and simplifying,Lrev can be reduced to,\nLrev =\nX\ni\n∂\n∂xi\ngi + 1\n2\nX\ni,j\n∂\n∂xi\n∂\n∂xj\nDij (A31)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A31)" }, { - "attribute": "type", + "name": "type", "value": "reverse_operator" } ] @@ -13660,11 +13660,11 @@ "content": "with the drift vectorg,\ngi(x, t) =fi(x, t)− 1\nQt(x)\nX\nj\n∂\n∂xj\n[Dij(x, t)Qt(x)](A32)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A32)" }, { - "attribute": "type", + "name": "type", "value": "drift_vector" } ] @@ -13675,11 +13675,11 @@ "content": "If∆tis chosen to be sufficiently small, Eq. (A32) can be linearized and the transition kernel is Gaussian,\nQt|t+∆t(x′|x)∝exp\n(\n−1\n2(x−µ) T Σ−1(x−µ)\n\n)\n(A33)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A33)" }, { - "attribute": "type", + "name": "type", "value": "gaussian_kernel" } ] @@ -13690,11 +13690,11 @@ "content": "µ=x+ ∆t g i(x, t)(A34)\nΣ = ∆t D(t)(A35)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A34)-(A35)" }, { - "attribute": "type", + "name": "type", "value": "gaussian_parameters" } ] @@ -13705,11 +13705,11 @@ "content": "Therefore, one can build a continuous diffusion model with arbitrary approximation power by working in the small∆t\nlimit and approximating the reverse process using a Gaussian distribution with a neural network defining the mean\nvector [1, 2].", "attributes": [ { - "attribute": "method", + "name": "method", "value": "neural_network_approximation" }, { - "attribute": "scope", + "name": "scope", "value": "continuous_models" } ] @@ -13720,11 +13720,11 @@ "content": "In a discrete diffusion model,Lis given by Eq. (A17). This tensor product form forLguarantees thatL(x′, x) = 0\nfor any vectorsx ′ andxthat have a Hamming distance greater than one (which means they have at leastN−1\nmatching elements).", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A17)" }, { - "attribute": "type", + "name": "type", "value": "discrete_operator" } ] @@ -13735,15 +13735,15 @@ "content": "As such, in discrete diffusion models, neural networks trained to approximate ratios of the data distribution QT−s (x′)\nQT−s (x) for neighboringx ′ andxcan be used to implement an arbitrarily good approximation to the\nactual reverse process [3].", "attributes": [ { - "attribute": "method", + "name": "method", "value": "neural_network_approximation" }, { - "attribute": "scope", + "name": "scope", "value": "discrete_models" }, { - "attribute": "reference", + "name": "reference", "value": "[3]" } ] @@ -13754,15 +13754,15 @@ "content": "Using Eq. (A28) and integration by parts, it can be shown that the adjoint operator is,\nL† = Σᵢ fᵢ ∂/∂xᵢ + ½ Σᵢⱼ Dᵢⱼ ∂/∂xᵢ ∂/∂xᵢ (A30)", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Continuous variables" }, { - "attribute": "equation", + "name": "equation", "value": "(A30)" }, { - "attribute": "page", + "name": "page", "value": "15" } ] @@ -13773,15 +13773,15 @@ "content": "Therefore, one can build a continuous diffusion model with arbitrary approximation power by working in the small Δt limit and approximating the reverse process using a Gaussian distribution with a neural network defining the mean vector [1, 2].", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Continuous variables" }, { - "attribute": "reference", + "name": "reference", "value": "[1, 2]" }, { - "attribute": "page", + "name": "page", "value": "15" } ] @@ -13792,15 +13792,15 @@ "content": "LDN (θ) = D(Q₀,...,T (·)||Pθ₀,...,T (·)) (A36)\nthe Markovian nature of Q can be taken advantage of to simplify Eq. (A36) into a layerwise form,\nLDN (θ) +C=− Σᵗ₌₁ᵀ E_Q(xₜ₋₁,xₜ) [log (Pθ(xₜ₋₁|xₜ)](A37)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A36), (A37)" }, { - "attribute": "section", + "name": "section", "value": "The Diffusion Loss" }, { - "attribute": "page", + "name": "page", "value": "16" } ] @@ -13811,15 +13811,15 @@ "content": "As such, in discrete diffusion models, neural networks trained to approximate ratios of the data distribution QT−s (x′)\nQT−s (x) for neighboringx ′ andxcan be used to implement an arbitrarily good approximation to the actual reverse process [3].", "attributes": [ { - "attribute": "reference", + "name": "reference", "value": "[3]" }, { - "attribute": "section", + "name": "section", "value": "Discrete variables" }, { - "attribute": "page", + "name": "page", "value": "15" } ] @@ -13830,15 +13830,15 @@ "content": "In principle, this is very simple: we concatenate the target (in our case, the images) and a one-hot encoding of the labels into a contiguous binary vector and treat that whole thing as our training data on which we train the denoising model as described above.", "attributes": [ { - "attribute": "application", + "name": "application", "value": "MNIST digit generation" }, { - "attribute": "section", + "name": "section", "value": "Conditional Generation" }, { - "attribute": "page", + "name": "page", "value": "17" } ] @@ -13849,15 +13849,15 @@ "content": "However, during conditional inference, the models will have their label nodes clamped to an unnoised labell0, and they may not know how this should influence the generated image (and this problem would only be exacerbated if we clamped to a noised label instead).\nThis issue can be mitigated by using a rateγX when noising image entries in the training data and a different rateγL for noising label entries.", "attributes": [ { - "attribute": "recommendation", + "name": "recommendation", "value": "Use different noise rates for images and labels" }, { - "attribute": "section", + "name": "section", "value": "Conditional Generation" }, { - "attribute": "page", + "name": "page", "value": "17" } ] @@ -13868,15 +13868,15 @@ "content": "The energy landscape is bimodal atλ= 0and gradually becomes distorted towards an unimodal distribution centered atx t asλincreases. This reshaping is intuitive, as shortening the forward process timestep should more strongly constrainx t−1 tox t.", "attributes": [ { - "attribute": "visualization", + "name": "visualization", "value": "FIG. 7" }, { - "attribute": "section", + "name": "section", "value": "Simplification of the Energy Landscape" }, { - "attribute": "page", + "name": "page", "value": "16" } ] @@ -13887,15 +13887,15 @@ "content": "∇θLDN (θ) =−\nTX\nt=1\nEQ(xt−1,xt)\n[∇θ log\nPθ(xt−1|xt)\n]\n(A38)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "(A38)" }, { - "attribute": "section", + "name": "section", "value": "Monte-Carlo gradient estimator" }, { - "attribute": "page", + "name": "page", "value": "16" } ] @@ -13906,11 +13906,11 @@ "content": "LDN (θ) =D\nQ0,...,T (·)||Pθ\n0,...,T (·)\n(A36)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A36" }, { - "attribute": "section", + "name": "section", "value": "3" } ] @@ -13921,11 +13921,11 @@ "content": "LDN (θ) +C=−\nTX\nt=1\nEQ(xt−1,xt) [log (Pθ(xt−1|xt)](A37)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A37" }, { - "attribute": "section", + "name": "section", "value": "3" } ] @@ -13936,7 +13936,7 @@ "content": "For denoising algorithms that operate in the infinitesimal limit, the simple form of Pθ allows forLDN and its gradients to be computed exactly.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3" } ] @@ -13947,11 +13947,11 @@ "content": "In the case wherePθ\nxt−1|xt\n\nis an EBM, there exists no simple closed-form expression for∇θLDN (θ). In that case, one must employ a Monte Carlo estimator to approximate the gradient.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "3a" }, { - "attribute": "model_type", + "name": "model_type", "value": "EBM" } ] @@ -13962,11 +13962,11 @@ "content": "∇θLDN (θ) =−\nTX\nt=1\nEQ(xt−1,xt)\n ∇θ log\n Pθ(xt−1|xt)\n \n (A38)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A38" }, { - "attribute": "section", + "name": "section", "value": "3a" } ] @@ -13977,15 +13977,15 @@ "content": "∇θ log\n Pθ(xt−1|xt)\n \n =E Pθ(xt−1,zt−1|xt)\n ∇θEm\n t−1\n \n −E Pθ(zt−1|xt−1,xt)\n ∇θEm\n t−1\n \n (A39)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A39" }, { - "attribute": "section", + "name": "section", "value": "3a" }, { - "attribute": "model_type", + "name": "model_type", "value": "EBM" } ] @@ -13996,11 +13996,11 @@ "content": "As the forward process timestep is made smaller, the energy landscape of the EBM-based approximation to the reverse process becomes simpler.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4" }, { - "attribute": "model_type", + "name": "model_type", "value": "EBM" } ] @@ -14011,11 +14011,11 @@ "content": "Eθ\nt−1 (xt−1) =\n x2\n t−1 −1\n \n 2\n(A40)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A40" }, { - "attribute": "section", + "name": "section", "value": "4" } ] @@ -14026,15 +14026,15 @@ "content": "Ef\nt−1 (xt−1, xt) =λ\n xt−1\n xt\n −1\n \n 2\n(A41)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "A41" }, { - "attribute": "section", + "name": "section", "value": "4" }, { - "attribute": "process_type", + "name": "process_type", "value": "Gaussian diffusion" } ] @@ -14045,11 +14045,11 @@ "content": "The parameterλscales inversely with the size of the forward process timestep; that is,lim\n∆t→0\nλ=∞.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4" }, { - "attribute": "parameter", + "name": "parameter", "value": "λ" } ] @@ -14060,7 +14060,7 @@ "content": "The reverse process conditional energy landscape is thenEθ\nt−1 +E f\nt−1.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4" } ] @@ -14071,11 +14071,11 @@ "content": "This reshaping is intuitive, as shortening the forward process timestep should more strongly constrainx t−1 tox t.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4" }, { - "attribute": "sentiment", + "name": "sentiment", "value": "intuitive" } ] @@ -14086,15 +14086,15 @@ "content": "The effect ofλon this is shown in Fig. 7.\nThe energy landscape is bimodal atλ= 0and gradually becomes distorted towards an unimodal distribution centered atx t asλincreases.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "4" }, { - "attribute": "reference", + "name": "reference", "value": "Figure 7" }, { - "attribute": "observation", + "name": "observation", "value": "bimodal to unimodal transition" } ] @@ -14105,15 +14105,15 @@ "content": "Experimentally, we observed that settings in the rangesγL ∈[0.1,0.3]andγ X ∈[0.7,1.5](for models with four to 12 steps) yielded good conditional generation performance while avoiding the freezing problem.", "attributes": [ { - "attribute": "methodology", + "name": "methodology", "value": "experimental" }, { - "attribute": "parameter_range", + "name": "parameter_range", "value": "γL: [0.1,0.3], γX: [0.7,1.5]" }, { - "attribute": "model_complexity", + "name": "model_complexity", "value": "4-12 steps" } ] @@ -14124,15 +14124,15 @@ "content": "If a DTM is trained to match the conditional distribution of the reverse process perfectly, the learned energy functionE θ t−1 is the energy function of the true marginal distribution, that is,Eθ t−1(x)∝logQ(x t−1).", "attributes": [ { - "attribute": "theoretical_basis", + "name": "theoretical_basis", "value": "Bayes' rule" }, { - "attribute": "model_type", + "name": "model_type", "value": "DTM (Diffusion Transition Model)" }, { - "attribute": "mathematical_relationship", + "name": "mathematical_relationship", "value": "Eθ t−1(x)∝logQ(x t−1)" } ] @@ -14143,15 +14143,15 @@ "content": "In this work, we focus on a hardware architecture for EBMs that are naturally expressed as Probabilistic Graphical Models (PGMs). In a PGM-EBM, the random variables involved in the model map to the nodes of a graph, which are connected by edges that indicate dependence between variables.", "attributes": [ { - "attribute": "architecture_type", + "name": "architecture_type", "value": "PGM-based EBM hardware" }, { - "attribute": "model_representation", + "name": "model_representation", "value": "Probabilistic Graphical Models" }, { - "attribute": "computational_approach", + "name": "computational_approach", "value": "modular sampling" } ] @@ -14162,15 +14162,15 @@ "content": "Since the sampling circuits only communicate locally, this type of computer will spend significantly less energy on communication than one built on a Von-Neumann-like architecture, which constantly shuttles data between compute and memory.", "attributes": [ { - "attribute": "efficiency_advantage", + "name": "efficiency_advantage", "value": "reduced communication energy" }, { - "attribute": "architecture_comparison", + "name": "architecture_comparison", "value": "local vs Von Neumann" }, { - "attribute": "computational_model", + "name": "computational_model", "value": "compute-in-memory" } ] @@ -14181,15 +14181,15 @@ "content": "Formally, the algorithm that defines this modular sampling procedure for PGMs is called Gibbs sampling. In Gibbs sampling, samples are drawn from the joint distributionp(x1, x2, . . . , xN )by iteratively updating the state of each node conditioned on the current state of its neighbors. For theith node, this means sampling from the distribution, xi[t+ 1]∼p(x i|nb(xi)[t]).", "attributes": [ { - "attribute": "algorithm", + "name": "algorithm", "value": "Gibbs sampling" }, { - "attribute": "sampling_method", + "name": "sampling_method", "value": "iterative node conditioning" }, { - "attribute": "mathematical_formulation", + "name": "mathematical_formulation", "value": "xi[t+1]∼p(xi|nb(xi)[t])" } ] @@ -14200,27 +14200,27 @@ "content": "Experimentally, we observed that settings in the rangesγL ∈[0.1,0.3]andγ X ∈[0.7,1.5](for models with four to 12 steps) yielded good conditional generation performance while avoiding the freezing problem.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Experimental observation" }, { - "attribute": "model_range", + "name": "model_range", "value": "4-12 steps" }, { - "attribute": "gamma_L", + "name": "gamma_L", "value": "[0.1,0.3]" }, { - "attribute": "gamma_X", + "name": "gamma_X", "value": "[0.7,1.5]" }, { - "attribute": "performance", + "name": "performance", "value": "good conditional generation" }, { - "attribute": "issue_avoided", + "name": "issue_avoided", "value": "freezing problem" } ] @@ -14231,19 +14231,19 @@ "content": "If a DTM is trained to match the conditional distribution of the reverse process perfectly, the learned energy functionE θ t−1 is the energy function of the true marginal distribution, that is,Eθ t−1(x)∝logQ(x t−1).", "attributes": [ { - "attribute": "theoretical_basis", + "name": "theoretical_basis", "value": "Bayes' rule application" }, { - "attribute": "model_type", + "name": "model_type", "value": "DTM (Diffusion Transition Model)" }, { - "attribute": "condition", + "name": "condition", "value": "perfect match with true reverse process" }, { - "attribute": "mathematical_relationship", + "name": "mathematical_relationship", "value": "Eθ t−1(x)∝logQ(x t−1)" } ] @@ -14254,15 +14254,15 @@ "content": "PGMs form a natural basis for a hardware architecture because they can be sampled using a modular procedure that respects the graph's structure. Specifically, the state of a PGM can be updated by iteratively stepping through each node of the graph and resampling one variable at a time, using only information about the current node and its immediate neighbors.", "attributes": [ { - "attribute": "model_type", + "name": "model_type", "value": "PGM-EBM (Probabilistic Graphical Model - Energy-Based Model)" }, { - "attribute": "sampling_method", + "name": "sampling_method", "value": "Modular procedure respecting graph structure" }, { - "attribute": "advantage", + "name": "advantage", "value": "Local communication, efficient hardware implementation" } ] @@ -14273,15 +14273,15 @@ "content": "This localPGMsampler representsa type of compute-in-memory approach, where the stateof the sampling program is spatially distributed throughout the array of sampling circuitry. Since the sampling circuits only communicate locally, this type of computer will spend significantly less energy on communication than one built on a Von Neumann-like architecture, which constantly shuttles data between compute and memory.", "attributes": [ { - "attribute": "architecture_type", + "name": "architecture_type", "value": "Compute-in-memory" }, { - "attribute": "advantage", + "name": "advantage", "value": "Significantly less energy on communication" }, { - "attribute": "comparison", + "name": "comparison", "value": "vs Von Neumann architecture" } ] @@ -14292,15 +14292,15 @@ "content": "Formally, the algorithm that defines this modular sampling procedure for PGMs is called Gibbs sampling. In Gibbs sampling, samples are drawn from the joint distributionp(x1, x2, . . . , xN )by iteratively updating the state of each node conditioned on the current state of its neighbors. For theith node, this means sampling from the distribution, xi[t+ 1]∼p(x i|nb(xi)[t]).", "attributes": [ { - "attribute": "algorithm_name", + "name": "algorithm_name", "value": "Gibbs sampling" }, { - "attribute": "sampling_procedure", + "name": "sampling_procedure", "value": "Iterative node updates conditioned on neighbors" }, { - "attribute": "mathematical_form", + "name": "mathematical_form", "value": "xi[t+ 1]∼p(x i|nb(xi)[t])" } ] @@ -14311,11 +14311,11 @@ "content": "Since each node's update distribution only depends on the state of its neighbors and because nodes of the same color do not neighbor each other, they can all be updated in parallel.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Chromatic Gibbs Sampling" }, { - "attribute": "page", + "name": "page", "value": "19" } ] @@ -14326,11 +14326,11 @@ "content": "The primary constraint around building a hardware device that implements Gibbs sampling is that the conditional update given in Eq. (B1) must be efficiently implementable. Generally, this means that one wants it to take a form that is 'natural' to the hardware substrate being used to build the computer.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Quadratic EBMs" }, { - "attribute": "page", + "name": "page", "value": "19" } ] @@ -14341,11 +14341,11 @@ "content": "Quadratic EBMs have energy functions that are quadratic in the model's variables, which generally leads to conditional updates computed by biasing a simple sampling circuit (Bernoulli, categorical, Gaussian, etc.) with the output of a linear function of the neighbor states and the model parameters. These simple interactions are efficient to implement in various types of hardware.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Quadratic EBMs" }, { - "attribute": "page", + "name": "page", "value": "19" } ] @@ -14356,11 +14356,11 @@ "content": "Therefore, to build a hardware device that samples from Potts models using Gibbs sampling, one would have to build a softmax sampling circuit parameterized by a linear function of the model weights and neighbor states. Potts model sampling is slightly more complicated than Boltzmann machine sampling, but it is likely possible.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Potts models" }, { - "attribute": "page", + "name": "page", "value": "20" } ] @@ -14371,11 +14371,11 @@ "content": "Hardware implementations of Gaussian-Bernoulli EBMs are more difficult than the strictly discrete models because the signals being passed during conditional sampling of the binary variables are continuous. To pass these continuous values, they must either be embedded into several discrete variables or an analog signaling system must be used. Both of these solutions would incur significant overhead compared to the purely discrete models.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Gaussian-Bernoulli EBMs" }, { - "attribute": "page", + "name": "page", "value": "20" } ] @@ -14386,11 +14386,11 @@ "content": "The denoising models used in this work exclusively modeled distributions of binary variables. The reverse process energy function (Eq. 7 in the main text) was implemented using a Boltzmann machine. The forward process energy functionEf t−1 was implemented using a simple set of pairwise couplings betweenxt (blue nodes) andxt−1 (green nodes). The marginal energy functionEθ t−1 was implemented using a latent variable model (latent nodes are drawn in orange) with a sparse, local coupling structure.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "A hardware architecture for denoising" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -14401,11 +14401,11 @@ "content": "Within the grid, we randomly choose some subset of the nodes to represent the data variablesxt−1. The remaining nodes then implement the latent variablezt−1. The grid is, therefore, a deep Boltzmann machine with a sparse connectivity structure and multiple hidden layers.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Implementation of the marginal energy function" }, { - "attribute": "page", + "name": "page", "value": "21" } ] @@ -14416,11 +14416,11 @@ "content": "Potts model sampling is slightly more complicated than Boltzmann machine sampling, but it is likely possible.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Potts models" }, { - "attribute": "page", + "name": "page", "value": "20" } ] @@ -14431,11 +14431,11 @@ "content": "Here, we will touch on a few other types of quadratic EBM that are more general. Although the experiments in this paper focused on Boltzmann machines, they could be trivially extended to these more expressive classes of distributions.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Introduction to quadratic EBMs" }, { - "attribute": "page", + "name": "page", "value": "20" } ] @@ -14446,11 +14446,11 @@ "content": "xi^m is a one-hot encoding of the state of variable xi, xi^m ∈ {0,1} (B5) ∑^M_m=1 xi^m = 1 (B6) which implies that xi^m = 1 for a single value of m, and is zero otherwise.", "attributes": [ { - "attribute": "model_type", + "name": "model_type", "value": "Potts" }, { - "attribute": "encoding", + "name": "encoding", "value": "one-hot" } ] @@ -14461,11 +14461,11 @@ "content": "p(xi^m = 1|mb(xi)) ∝ 1/Z e^{-θ_i^m} (B9) θ_i^m = β(2∑_{j∈mb(xi),n} J_ij^mn x_j^n + h_i^m) (B10)", "attributes": [ { - "attribute": "distribution_type", + "name": "distribution_type", "value": "softmax" }, { - "attribute": "condition", + "name": "condition", "value": "symmetric weights" } ] @@ -14476,15 +14476,15 @@ "content": "Therefore, to build a hardware device that samples from Potts models using Gibbs sampling, one would have to build a softmax sampling circuit parameterized by a linear function of the model weights and neighbor states. Potts model sampling is slightly more complicated than Boltzmann machine sampling, but it is likely possible.", "attributes": [ { - "attribute": "implementation", + "name": "implementation", "value": "hardware" }, { - "attribute": "complexity", + "name": "complexity", "value": "moderate" }, { - "attribute": "feasibility", + "name": "feasibility", "value": "likely possible" } ] @@ -14495,15 +14495,15 @@ "content": "Gaussian-Bernoulli EBMs extend Boltzmann machines to continuous, binary mixtures. In general, this type of model can have continuous-continuous, binary-binary, and binary-continuous interactions.", "attributes": [ { - "attribute": "model_type", + "name": "model_type", "value": "Gaussian-Bernoulli" }, { - "attribute": "extensions", + "name": "extensions", "value": "continuous, binary mixtures" }, { - "attribute": "interaction_types", + "name": "interaction_types", "value": "three types" } ] @@ -14514,15 +14514,15 @@ "content": "Although the experiments in this paper focused on Boltzmann machines, they could be trivially extended to these more expressive classes of distributions.", "attributes": [ { - "attribute": "scope", + "name": "scope", "value": "research experiments" }, { - "attribute": "extendibility", + "name": "extendibility", "value": "trivial" }, { - "attribute": "focus", + "name": "focus", "value": "Boltzmann machines" } ] @@ -14533,11 +14533,11 @@ "content": "A graph for hardware denoising. The grid is subdivided at random into visible (green) nodes, representing the variablesx t−1, and latent (orange) nodes, representingzt−1. Each visible nodext−1 j is coupled to a (blue) node carrying the value from the previous step of denoisingxt j (note that these blue nodes stay fixed throughout the Gibbs sampling).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Fig. 9b description" }, { - "attribute": "section", + "name": "section", "value": "Hardware Architecture" } ] @@ -14548,11 +14548,11 @@ "content": "As explicitly stated in Eq. 7 of the article, our variational approximation to the reverse process conditional has an energy function that is the sum of the forward process energy function and the marginal energy function. Physically, this corresponds to adding nodes to our grid that implementxt, which are connected pairwise to the data nodes implementingx t−1 via the coupling defined in Eq. (C1).", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Section after Table I" }, { - "attribute": "equation", + "name": "equation", "value": "Eq. 7" } ] @@ -14563,15 +14563,15 @@ "content": "We provide experimental measurements of our novel RNG circuitry in the main text, which establish that random bits can be produced at a rate ofτ−1 rng ≈10MHz using∼350aJ of energy per bit.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Appendix D: Energetic analysis" }, { - "attribute": "measurement", + "name": "measurement", "value": "Experimental" }, { - "attribute": "performance_metric", + "name": "performance_metric", "value": "10MHz, 350aJ/bit" } ] @@ -14582,11 +14582,11 @@ "content": "The design considered here utilizes a linear analog circuit to combine the neighboring states and model weights, producing a control voltage for an RNG. This RNG then produces a random bit that is biased by a sigmoidal function of the control voltage. This updated state is then broadcast back to the neighbors.", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Appendix D: Sampling cell design" }, { - "attribute": "component", + "name": "component", "value": "Linear analog circuit + RNG" } ] @@ -14597,11 +14597,11 @@ "content": "Pattern Connectivity\nG8 (0,1),(4,1)\nG12 (0,1),(4,1),(9,10)\nG16 (0,1),(4,1),(8,7),(14,9)\nG20 (0,1),(4,1),(3,6),(8,7),(14,9)\nG24 (0,1),(1,2),(4,1),(3,6),(8,7),(14,9)", "attributes": [ { - "attribute": "source", + "name": "source", "value": "Table I" }, { - "attribute": "type", + "name": "type", "value": "Connectivity patterns" } ] @@ -14612,11 +14612,11 @@ "content": "Fig. 15 (a) shows an output voltage waveform from the RNG circuit. It wanders randomly between high and low states. Critically, the bias of the RNG circuit (the probability of finding it in the high or low state) is a sigmoidal function of its control voltage, which allows", "attributes": [ { - "attribute": "source", + "name": "source", "value": "End of page 21" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 15(a)" } ] @@ -14627,11 +14627,11 @@ "content": "Our hardware denoising architecture (a)An example of a possible connectivity pattern as specified in Table. I. For clarity, the pattern is illustrated as applied to a single cell; however, in reality, the pattern is repeated for every cell in the grid.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Hardware Architecture Overview" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 9(a)" } ] @@ -14642,11 +14642,11 @@ "content": "(b)A graph for hardware denoising. The grid is subdivided at random into visible (green) nodes, representing the variables x_{t-1}, and latent (orange) nodes, representing z_{t-1}. Each visible node x_{t-1}_j is coupled to a (blue) node carrying the value from the previous step of denoising x^j_t (note that these blue nodes stay fixed throughout the Gibbs sampling).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Hardware Architecture Overview" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 9(b)" } ] @@ -14657,11 +14657,11 @@ "content": "As explicitly stated in Eq. 7 of the article, our variational approximation to the reverse process conditional has an energy function that is the sum of the forward process energy function and the marginal energy function.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Variational Approximation" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. 7" } ] @@ -14672,11 +14672,11 @@ "content": "Our RNG design uses only transistors and can integrate tightly with other traditional circuit components on a chip to implement a large-scale sampling system. Since there are no exotic components involved that introduce unknown integration barriers, it is straightforward to build a simple physical model to predict how this device utilizes energy.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "RNG Design" }, { - "attribute": "confidence", + "name": "confidence", "value": "high" } ] @@ -14687,11 +14687,11 @@ "content": "The performance of the device can be understood by analyzing the unit sampling cell that lives on each node of the PGM implemented by the hardware. The function of this cell is to implement the Boltzmann machine conditional update, as given in Eq. 11 in the main text.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Unit Sampling Cell" }, { - "attribute": "reference", + "name": "reference", "value": "Eq. 11" } ] @@ -14702,11 +14702,11 @@ "content": "There are many possible designs for the sampling cell. The design considered here utilizes a linear analog circuit to combine the neighboring states and model weights, producing a control voltage for an RNG. This RNG then produces a random bit that is biased by a sigmoidal function of the control voltage.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Sampling Cell Design" }, { - "attribute": "design_type", + "name": "design_type", "value": "linear analog circuit" } ] @@ -14717,15 +14717,15 @@ "content": "We provide experimental measurements of our novel RNG circuitry in the main text, which establish that random bits can be produced at a rate of τ^{-1}_{rng} ≈10MHz using ~350aJ of energy per bit.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Experimental Results" }, { - "attribute": "measurement_type", + "name": "measurement_type", "value": "energy consumption" }, { - "attribute": "confidence", + "name": "confidence", "value": "experimental" } ] @@ -14736,15 +14736,15 @@ "content": "Fig. 15 (a) shows an output voltage waveform from the RNG circuit. It wanders randomly between high and low states. Critically, the bias of the RNG circuit (the probability of finding it in the high or low state) is a sigmoidal function of its control voltage, which allows", "attributes": [ { - "attribute": "section", + "name": "section", "value": "RNG Circuit Behavior" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 15(a)" }, { - "attribute": "circuit_behavior", + "name": "circuit_behavior", "value": "sigmoidal bias function" } ] @@ -14755,11 +14755,11 @@ "content": "Pattern Connectivity\nG8 (0,1),(4,1)\nG12 (0,1),(4,1),(9,10)\nG16 (0,1),(4,1),(8,7),(14,9)\nG20 (0,1),(4,1),(3,6),(8,7),(14,9)\nG24 (0,1),(1,2),(4,1),(3,6),(8,7),(14,9)\nTABLE I. Edges (ordered pairs) associated with graphs of various degrees.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Connectivity Patterns" }, { - "attribute": "table_reference", + "name": "table_reference", "value": "Table I" } ] @@ -14770,11 +14770,11 @@ "content": "The cell must also support initialization and readout (get/set state operations). A schematic of a unit cell is shown in Fig. 8.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Sampling Cell Operations" }, { - "attribute": "reference", + "name": "reference", "value": "Fig. 8" } ] @@ -14785,15 +14785,15 @@ "content": "V ∞\nb =\nn+2X\nj=1\nGj\nGΣ\nVddyj (D4)\nwhere the total conductanceGΣ is,\nGΣ =\nn+2X\nj=1\nGj (D5)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D4-D5" }, { - "attribute": "type", + "name": "type", "value": "circuit_parameter" }, { - "attribute": "voltage", + "name": "voltage", "value": "V∞b" } ] @@ -14804,15 +14804,15 @@ "content": "P(x i = 1) =σ\n(Vb\nVs\n−ϕ\n)\ninserting Eq. (D4) and expanding the term inside the sigmoid,\nVb\nVs\n−ϕ=\nnX\nj=1\nGj\nGΣ\nVdd\nVs\n(xj ⊕s j) +\n[Gn+1\nGΣ\nVdd\nVs\n−ϕ\n]\n(D7)\nby comparison to the Boltzmann machine conditional, we can see that the first term implements the model weights\n(which can be positive or negative given an appropriate setting of the sign bitsj), and the second term implements\na bias.", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D6-D7" }, { - "attribute": "type", + "name": "type", "value": "bias_curve" }, { - "attribute": "function", + "name": "function", "value": "sigmoid" } ] @@ -14823,15 +14823,15 @@ "content": "The static power drawn by this circuit can be written in the form,\nP∞ = C\nτbias\nV 2\ndd(1−γ)γ(D8)\nwhere0≤γ≤1is the input-dependent constant,\nγ=\nn+2X\nj=1\nGj\nGΣ\nyj (D9)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D8-D9" }, { - "attribute": "type", + "name": "type", "value": "power_consumption" }, { - "attribute": "parameter", + "name": "parameter", "value": "γ" } ] @@ -14842,15 +14842,15 @@ "content": "This fixed point must be held while the noise generator relaxes, which means that the energetic cost of the biasing\ncircuit is approximately,\nEbias ≈P∞τrng\n=C τrng\nτbias\nV 2\ndd(1−γ)γ (D10)\nThis is maximized forγ= 1\n2 .\nTo avoid slowing down the sampling machine,τrng\nτbias\n≫1. As such, ignoring the energy spent charging the capacitor\n∼ 1\n2 CV 2\nb will not significantly affect the results, and the approximation made in Eq. (D10) should be accurate. The\nenergy consumed by the bias circuit is primarily due to static power dissipation.", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D10" }, { - "attribute": "type", + "name": "type", "value": "energy_consumption" }, { - "attribute": "maximum", + "name": "maximum", "value": "γ=1/2" } ] @@ -14861,15 +14861,15 @@ "content": "In most electronic devices, signals are communicated by charging and discharging wires. Charging a wire\nrequires the energy input,\nEcharge = 1\n2CwireV 2\nsig (D11)\nwhereC wire is the capacitance associated with the wire, which grows with its length, andVsig is the signaling voltage\nlevel.", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D11" }, { - "attribute": "type", + "name": "type", "value": "communication_energy" }, { - "attribute": "dependency", + "name": "dependency", "value": "wire_length" } ] @@ -14880,15 +14880,15 @@ "content": "Given the connectivity patterns shown in table I, it is possible to estimate the total capacitanceCn associated\nwith the wire connecting a node to all of its neighbors,\nCn = 4ηℓ\nX\ni\nq\na2\ni +b 2\ni (D12)\nwhereℓ≈6µmis the sampling cell side length, andη≈350aF/µmis the wire capacitance per unit length in our process, see Fig. 11 (b).ai andb i are thexandycomponents of thei th connection rule, as described in section C.2.", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D12" }, { - "attribute": "type", + "name": "type", "value": "capacitance_estimation" }, { - "attribute": "parameters", + "name": "parameters", "value": "ℓ=6µm, η=350aF/µm" } ] @@ -14899,15 +14899,15 @@ "content": "Several systems on the chip require signals to be transmitted from some central location out to the individual\nsampling cells. This communication involves sending signals over long wires with a large capacitance, which is\nenergetically expensive. Here, the cost of this global communication will be taken into consideration.\na. Clocking\nAlthough it is possible in principle to implement Gibbs sampling completely asynchronously, in practice, it is more\nefficient to implement standard chromatic Gibbs sampling with a global clock. A global clock requires a signal to\nbe distributed from a central clock circuit to every sampling cell on the chip. This signal distribution is typically\naccomplished using a clock tree, a branching circuit designed to minimize timing inconsistencies between disparate\ncircuit elements.\nTo simplify the analysis, we will consider a simple clock distribution scheme in which the clock is distributed by\nlines that run the entire length of each row in the grid. The total length of the wires used for clock distribution in\nthis scheme is,\nLclock =N L(D13)", "attributes": [ { - "attribute": "equation", + "name": "equation", "value": "D13" }, { - "attribute": "type", + "name": "type", "value": "clock_distribution" }, { - "attribute": "scheme", + "name": "scheme", "value": "row-based" } ] @@ -14918,15 +14918,15 @@ "content": "Section D.1 discusses a simple circuit that uses resistors to implement the multiply-accumulate required by the conditional update rule. Key to this is being able to tune the conductance of the resistors to implement specific sets of weights and biases (see Eq. (D7)).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.1" }, { - "attribute": "component", + "name": "component", "value": "resistor circuit" }, { - "attribute": "function", + "name": "function", "value": "multiply-accumulate" } ] @@ -14937,15 +14937,15 @@ "content": "Practically, implementing this tunability requires that the model parameters be stored in memory somewhere on the chip. Writing to and maintaining these memories costs energy. Writing to the memories uses much more energy than maintaining the state.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.1" }, { - "attribute": "memory_type", + "name": "memory_type", "value": "on-chip" }, { - "attribute": "energy_comparison", + "name": "energy_comparison", "value": "writing >> maintenance" } ] @@ -14956,15 +14956,15 @@ "content": "However, if writes are infrequent (program the device once and then run many sampling programs on it before writing again), then the overall cost of the memory is dominated by maintenance. Luckily, most conventional memories are specifically designed to consume as little energy as possible when not being accessed.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.1" }, { - "attribute": "usage_pattern", + "name": "usage_pattern", "value": "infrequent writes" }, { - "attribute": "cost_dominance", + "name": "cost_dominance", "value": "maintenance dominates" } ] @@ -14975,15 +14975,15 @@ "content": "As such, in practice, the cost of memory maintenance is small compared to the other costs associated with the sampling cells and does not significantly change the outcome shown in Fig. 12.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.1" }, { - "attribute": "impact_level", + "name": "impact_level", "value": "system level" }, { - "attribute": "cost_significance", + "name": "cost_significance", "value": "negligible" } ] @@ -14994,15 +14994,15 @@ "content": "The cost of this communication depends strongly on the tightness of integration between the two systems and is impossible to reason about at an abstract level. As such, the analysis of communication here (as in Section D3b) was limited to the cost of getting bits out to the edge of our chip, which is a lower bound on the actual cost.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.2" }, { - "attribute": "analysis_scope", + "name": "analysis_scope", "value": "chip edge only" }, { - "attribute": "bound_type", + "name": "bound_type", "value": "lower bound" } ] @@ -15013,15 +15013,15 @@ "content": "However, we have found that a more detailed analysis, which includes the cost of communication between two chips mediated by a PCB, does not significantly change the results at the system level.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.2" }, { - "attribute": "analysis_detail", + "name": "analysis_detail", "value": "PCB-mediated" }, { - "attribute": "result_impact", + "name": "result_impact", "value": "no significant change" } ] @@ -15032,15 +15032,15 @@ "content": "The fundamental reason for this is that sampling programs for complex models run for many iterations before mixing and sending the results back to the outside world. This is reflected in the discrepancy betweenEsamp andE init +E read found in section D.4.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.2" }, { - "attribute": "program_behavior", + "name": "program_behavior", "value": "many iterations" }, { - "attribute": "metric_discrepancy", + "name": "metric_discrepancy", "value": "Esamp vs E_init + E_read" } ] @@ -15051,15 +15051,15 @@ "content": "Due to the heterogeneity of our architecture, it is possible to share most of the supporting circuitry among many sampling cells, which dramatically reduces the per-cell cost.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.3" }, { - "attribute": "architecture_type", + "name": "architecture_type", "value": "heterogeneous" }, { - "attribute": "cost_reduction", + "name": "cost_reduction", "value": "dramatic per-cell reduction" } ] @@ -15070,15 +15070,15 @@ "content": "As such, the energy cost of the supporting circuitry is not significant at the system level.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "D.3" }, { - "attribute": "cost_level", + "name": "cost_level", "value": "system level" }, { - "attribute": "significance", + "name": "significance", "value": "insignificant" } ] @@ -15089,15 +15089,15 @@ "content": "All experiments shown in Fig. 1 in the article were conducted on NVIDIA A100 GPUs. The empirical estimates of energy were conducted by drawing a batch of samples from the model and measuring the GPU energy consumption and time via Zeus [5]. The theoretical energy estimates were derived by taking the number of model FLOPS (via JAX and PyTorch's internal estimators) and plugging them into the NVIDIA GPU specifications (19.5 TFLOPS for Float32 and 400W).", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix E" }, { - "attribute": "hardware", + "name": "hardware", "value": "NVIDIA A100 GPU" }, { - "attribute": "measurement_method", + "name": "measurement_method", "value": "Zeus + FLOPS estimation" } ] @@ -15108,15 +15108,15 @@ "content": "The empirical measurements are compared to theoretical estimates for the VAE in Table II, and the empirical measurements show good alignment with the theoretical.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix E" }, { - "attribute": "model_type", + "name": "model_type", "value": "VAE" }, { - "attribute": "alignment", + "name": "alignment", "value": "good empirical-theoretical" } ] @@ -15127,15 +15127,15 @@ "content": "FID Empirical Efficiency Theoretical Efficiency\n30.5 6.1×10 −5 2.3×10 −5\n27.4 1.5×10 −4 0.4×10 −4\n17.9 2.5×10 −3 1.7×10 −3", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix E" }, { - "attribute": "data_type", + "name": "data_type", "value": "energy efficiency" }, { - "attribute": "units", + "name": "units", "value": "joules per sample" } ] @@ -15146,15 +15146,15 @@ "content": "The models were derived from available implementations and are based on ResNet [6] and UNet [7] style architectures. Their FID performance is consistent with published literature values [8–10]. The goal is not to achieve state of the art performance, but to represent the relative scales of energy consumption of the algorithms.", "attributes": [ { - "attribute": "section", + "name": "section", "value": "Appendix E" }, { - "attribute": "architecture_style", + "name": "architecture_style", "value": "ResNet/UNet" }, { - "attribute": "research_focus", + "name": "research_focus", "value": "relative energy scales" } ] @@ -15165,15 +15165,15 @@ "content": "The reader may be surprised to see that the diffusion model is substantially less energy-efficient than the VAE given the relative dominance in image generation. However, two points should be kept in mind. First, while VAE remains a semi-competitive model for these smaller datasets, this quickly breaks down. On larger datasets, a FID performance gap usually exists between diffusion models and VAEs. Second, these diffusion models (based on the original DDPM [2]) have performance that can depend on the number of diffusion time steps. So, not only is the UNet model often larger than a VAE decoder, but it also must be run dozens to thousands of times in order to generate a single sample (thus resulting in multiple orders of magnitude more energy required). Modern improvements, such as distillation [11], may move the diffusion model energy efficiency closer to the VAE's.", "attributes": [ { - "attribute": "model_comparison", + "name": "model_comparison", "value": "diffusion_models_vs_vae" }, { - "attribute": "energy_consumption", + "name": "energy_consumption", "value": "multiple_orders_of_magnitude" }, { - "attribute": "technical_detail", + "name": "technical_detail", "value": "UNet_vs_VAE_decoder" } ] @@ -15184,11 +15184,11 @@ "content": "Modern improvements, such as distillation [11], may move the diffusion model energy efficiency closer to the VAE's.", "attributes": [ { - "attribute": "improvement_suggestion", + "name": "improvement_suggestion", "value": "distillation" }, { - "attribute": "optimistic_outlook", + "name": "optimistic_outlook", "value": "positive" } ] @@ -15199,11 +15199,11 @@ "content": "The total correlation penalty is a convenient choice in this context because its gradients can be computed using the same samples used to estimate the gradient of the usual loss used in training,∇θLDN .", "attributes": [ { - "attribute": "computational_efficiency", + "name": "computational_efficiency", "value": "sample_reuse" }, { - "attribute": "optimization", + "name": "optimization", "value": "gradient_computation" } ] @@ -15214,15 +15214,15 @@ "content": "To address this, we employ an Adaptive Correlation Penalty (ACP) scheme that dynamically adjustsλt based on an estimate of the model's current mixing time. We use the autocorrelation of the Gibbs sampling chain,rt yy, as a proxy for mixing, as described in Section H and the main text, Eq. 18.", "attributes": [ { - "attribute": "method_name", + "name": "method_name", "value": "ACP" }, { - "attribute": "control_parameter", + "name": "control_parameter", "value": "λt" }, { - "attribute": "proxy_metric", + "name": "proxy_metric", "value": "autocorrelation" } ] @@ -15233,11 +15233,11 @@ "content": "A simple layerwise procedure is used for this control. The inputs to the algorithm are the initial values ofλt, a target autocorrelation thresholdεACP (e.g.,0.03), an update factorδ ACP (e.g.,0.2) and a lower limitλmin t (e.g.,0.0001).\nAt the end of each training epochm:\n1. Estimate the current autocorrelationat m =r t yy[K]. This estimate can be done by running a longer Gibbs chain periodically and calculating the empirical autocorrelation from the samples.\n2. Setλ′ t =max(λmin t , λ(m) t )to avoid getting stuck at 0.\n3. Updateλ t for the next epoch (m+ 1) based onat m and the previous valueat m−1 (ifm >0):\n•Ifa t m < εACP: The chain mixes sufficiently fast; reduce the penalty slightly.\nλ(m+1) t ←(1−δ ACP)λ′ t\n•Else ifa t m ≥εACP anda t m ≤a t m−1 (orm= 0): Mixing is slow but not worsening (or baseline); keep the penalty strength.\nλ(m+1) t ←λ′ t\n•Else (a t m > εACP anda t m > at m−1): Mixing is slow and worsening; increase the penalty.\nλ(m+1) t ←(1 +δ ACP)λ′ t\n4. If the proposed valueλ(m+1) t < λmin t , then setλ(m+1) t ←0.", "attributes": [ { - "attribute": "algorithm_complexity", + "name": "algorithm_complexity", "value": "layerwise_procedure" }, { - "attribute": "update_logic", + "name": "update_logic", "value": "conditional_adjustments" } ] @@ -15248,11 +15248,11 @@ "content": "Our experiments indicate that this simple feedback mechanism works effectively. Whileλt and the autocorrelation at m might exhibit some damped oscillations for several epochs before stabilizing this automated procedure is vastly more efficient than performing manual hyperparameter searches forλt for each of theTmodels.", "attributes": [ { - "attribute": "efficiency_claim", + "name": "efficiency_claim", "value": "vastly_more_efficient" }, { - "attribute": "automation_benefit", + "name": "automation_benefit", "value": "reduced_manual_tuning" } ] @@ -15263,11 +15263,11 @@ "content": "Training is relatively insensitive to the exact choice ofεACP within a reasonable range (e.g.,[0.02,0.1]) andδ ACP (e.g.,[0.1,0.3]). Assuming that over the course of training theλ t parameter settles around some valueλ ∗ t , one should aim for the lower bound parameterλmin t to be smaller than1 2 λ∗ t , while making sure that the ramp-up time log(λ∗ t )−log(λmin t ) log(1+δACP) remains small. Settings ofλmin t in the range[0.001,0.00001]all produced largely the same result, the only difference being that values on the lower end of that range led to a larger amplitude in oscillations ofλt andat m, but training eventually settled for all values.", "attributes": [ { - "attribute": "hyperparameter_robustness", + "name": "hyperparameter_robustness", "value": "wide_range_tolerance" }, { - "attribute": "parameter_ranges", + "name": "parameter_ranges", "value": "εACP[0.02,0.1], δACP[0.1,0.3], λmin[0.001,0.00001]" } ] @@ -15278,11 +15278,11 @@ "content": "In some of our experiments, we needed to embed continuous data into binary variables. We chose to do this by representing ak-state categorical variableXi using the sumkbinary variablesZ k i , Xi = KiX k=1 Z(k) i (G1) whereZ (k) i ∈ {0,1}.", "attributes": [ { - "attribute": "embedding_technique", + "name": "embedding_technique", "value": "binary_representation" }, { - "attribute": "variable_type", + "name": "variable_type", "value": "categorical_to_binary" } ] @@ -15293,15 +15293,15 @@ "content": "The reader may be surprised to see that the diffusion model is substantially less energy-efficient than the VAE given the relative dominance in image generation. However, two points should be kept in mind. First, while VAE remains a semi-competitive model for these smaller datasets, this quickly breaks down. On larger datasets, a FID performance gap usually exists between diffusion models and VAEs. Second, these diffusion models (based on the original DDPM [2]) have performance that can depend on the number of diffusion time steps. So, not only is the UNet model often larger than a VAE decoder, but it also must be run dozens to thousands of times in order to generate a single sample (thus resulting in multiple orders of magnitude more energy required). Modern improvements, such as distillation [11], may move the diffusion model energy efficiency closer to the VAE's.", "attributes": [ { - "attribute": "document_type", + "name": "document_type", "value": "research_paper" }, { - "attribute": "field", + "name": "field", "value": "machine_learning" }, { - "attribute": "focus_area", + "name": "focus_area", "value": "diffusion_models_and_regularization" } ]