last commit
parent
feab6c8c0e
commit
ae145e2e76
|
@ -0,0 +1,241 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="AutoImportSettings">
|
||||
<option name="autoReloadType" value="SELECTIVE" />
|
||||
</component>
|
||||
<component name="BackendCodeEditorMiscSettings">
|
||||
<option name="/Default/RiderDebugger/RiderRestoreDecompile/RestoreDecompileSetting/@EntryValue" value="false" type="bool" />
|
||||
<option name="/Default/Housekeeping/GlobalSettingsUpgraded/IsUpgraded/@EntryValue" value="true" type="bool" />
|
||||
<option name="/Default/Housekeeping/FeatureSuggestion/FeatureSuggestionManager/DisabledSuggesters/=SwitchToGoToActionSuggester/@EntryIndexedValue" value="true" type="bool" />
|
||||
<option name="/Default/Environment/Hierarchy/GeneratedFilesCacheKey/Timestamp/@EntryValue" value="64" type="long" />
|
||||
<option name="/Default/Housekeeping/OptionsDialog/SelectedPageId/@EntryValue" value="CppFormatterOtherPage" type="string" />
|
||||
<option name="/Default/Housekeeping/Search/HighlightUsagesHintUsed/@EntryValue" value="true" type="bool" />
|
||||
<option name="/Default/Housekeeping/FeatureSuggestion/FeatureSuggestionManager/DisabledSuggesters/=SwitchToGoToActionSuggester/@EntryIndexRemoved" />
|
||||
</component>
|
||||
<component name="CMakePresetLoader">{
|
||||
"useNewFormat": true
|
||||
}</component>
|
||||
<component name="CMakeProjectFlavorService">
|
||||
<option name="flavorId" value="CMakePlainProjectFlavor" />
|
||||
</component>
|
||||
<component name="CMakeReloadState">
|
||||
<option name="reloaded" value="true" />
|
||||
</component>
|
||||
<component name="CMakeRunConfigurationManager">
|
||||
<generated>
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="COSC-4P80-Final-Project" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="glfw" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="uninstall" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="BLT_WITH_GRAPHICS" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="BLT" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="freetype" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="opennn" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="dlib" />
|
||||
<config projectName="COSC-4P80-Final-Project" targetName="update_mappings" />
|
||||
</generated>
|
||||
</component>
|
||||
<component name="CMakeSettings">
|
||||
<configurations>
|
||||
<configuration PROFILE_NAME="Debug" ENABLED="true" CONFIG_NAME="Debug" />
|
||||
<configuration PROFILE_NAME="Release" ENABLED="true" CONFIG_NAME="Release" />
|
||||
<configuration PROFILE_NAME="RelWithDebInfo" ENABLED="true" CONFIG_NAME="RelWithDebInfo" />
|
||||
<configuration PROFILE_NAME="RelWithDebInfo Addrsan" ENABLED="true" CONFIG_NAME="RelWithDebInfo" GENERATION_OPTIONS="-DENABLE_ADDRSAN=ON -DENABLE_UBSAN=ON" />
|
||||
</configurations>
|
||||
</component>
|
||||
<component name="ChangeListManager">
|
||||
<list default="true" id="9c238110-7b79-4fb8-a517-1a6ad61b867f" name="Changes" comment="">
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/CMakeLists.txt" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/CMakeLists.txt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/build_emscript.sh" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/build_emscript.sh" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/cloc.sh" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/cloc.sh" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/commit.py" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/commit.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/libraries/BLT" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/libraries/BLT" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/lib/blt-with-graphics/resources/fonts/a.out" beforeDir="false" afterPath="$PROJECT_DIR$/lib/blt-with-graphics/resources/fonts/a.out" afterDir="false" />
|
||||
</list>
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
||||
<option name="LAST_RESOLUTION" value="IGNORE" />
|
||||
</component>
|
||||
<component name="ClangdSettings">
|
||||
<option name="formatViaClangd" value="false" />
|
||||
</component>
|
||||
<component name="ExecutionTargetManager" SELECTED_TARGET="CMakeBuildProfile:Release" />
|
||||
<component name="Git.Settings">
|
||||
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
|
||||
</component>
|
||||
<component name="HighlightingSettingsPerFile">
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
<setting file="mock:///dummy.cpp" root0="SKIP_HIGHLIGHTING" />
|
||||
</component>
|
||||
<component name="ProjectApplicationVersion">
|
||||
<option name="ide" value="CLion" />
|
||||
<option name="majorVersion" value="2024" />
|
||||
<option name="minorVersion" value="3" />
|
||||
<option name="productBranch" value="Classic" />
|
||||
</component>
|
||||
<component name="ProjectColorInfo">{
|
||||
"associatedIndex": 0
|
||||
}</component>
|
||||
<component name="ProjectId" id="2pxLBGwjdrQBWWQcqWdqZJ2ET2e" />
|
||||
<component name="ProjectViewState">
|
||||
<option name="hideEmptyMiddlePackages" value="true" />
|
||||
<option name="showLibraryContents" value="true" />
|
||||
</component>
|
||||
<component name="PropertiesComponent">{
|
||||
"keyToString": {
|
||||
"CMake Application.COSC-4P80-Final-Project.executor": "Run",
|
||||
"RunOnceActivity.RadMigrateCodeStyle": "true",
|
||||
"RunOnceActivity.ShowReadmeOnStart": "true",
|
||||
"RunOnceActivity.cidr.known.project.marker": "true",
|
||||
"RunOnceActivity.readMode.enableVisualFormatting": "true",
|
||||
"RunOnceActivity.west.config.association.type.startup.service": "true",
|
||||
"SHARE_PROJECT_CONFIGURATION_FILES": "true",
|
||||
"cf.first.check.clang-format": "false",
|
||||
"cidr.known.project.marker": "true",
|
||||
"git-widget-placeholder": "main",
|
||||
"last_opened_file_path": "/home/brett/Documents/Brock/CS 4P80/COSC-4P80-Final-Project",
|
||||
"node.js.detected.package.eslint": "true",
|
||||
"node.js.detected.package.tslint": "true",
|
||||
"node.js.selected.package.eslint": "(autodetect)",
|
||||
"node.js.selected.package.tslint": "(autodetect)",
|
||||
"nodejs_package_manager_path": "npm",
|
||||
"settings.editor.selected.configurable": "preferences.lookFeel",
|
||||
"vue.rearranger.settings.migration": "true"
|
||||
}
|
||||
}</component>
|
||||
<component name="RunManager" selected="CMake Application.COSC-4P80-Final-Project">
|
||||
<configuration name="BLT" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="BLT" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="BLT_WITH_GRAPHICS" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="BLT_WITH_GRAPHICS" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="COSC-4P80-Final-Project" type="CMakeRunConfiguration" factoryName="Application" PROGRAM_PARAMS="part2" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="COSC-4P80-Final-Project" CONFIG_NAME="Debug" RUN_TARGET_PROJECT_NAME="COSC-4P80-Final-Project" RUN_TARGET_NAME="COSC-4P80-Final-Project">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="dlib" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="dlib" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="freetype" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="freetype" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="glfw" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="glfw" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="uninstall" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="uninstall" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration name="update_mappings" type="CMakeRunConfiguration" factoryName="Application" REDIRECT_INPUT="false" ELEVATE="false" USE_EXTERNAL_CONSOLE="false" EMULATE_TERMINAL="false" PASS_PARENT_ENVS_2="true" PROJECT_NAME="COSC-4P80-Final-Project" TARGET_NAME="update_mappings" CONFIG_NAME="Debug">
|
||||
<method v="2">
|
||||
<option name="com.jetbrains.cidr.execution.CidrBuildBeforeRunTaskProvider$BuildBeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="true" type="MAKEINDEX_RUN_CONFIGURATION" factoryName="LaTeX configuration factory">
|
||||
<texify-makeindex>
|
||||
<program>MAKEINDEX</program>
|
||||
<main-file />
|
||||
<command-line-args />
|
||||
<work-dir />
|
||||
</texify-makeindex>
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<list>
|
||||
<item itemvalue="CMake Application.BLT_WITH_GRAPHICS" />
|
||||
<item itemvalue="CMake Application.BLT" />
|
||||
<item itemvalue="CMake Application.COSC-4P80-Final-Project" />
|
||||
<item itemvalue="CMake Application.dlib" />
|
||||
<item itemvalue="CMake Application.freetype" />
|
||||
<item itemvalue="CMake Application.uninstall" />
|
||||
<item itemvalue="CMake Application.glfw" />
|
||||
<item itemvalue="CMake Application.update_mappings" />
|
||||
</list>
|
||||
</component>
|
||||
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
|
||||
<component name="TaskManager">
|
||||
<task active="true" id="Default" summary="Default task">
|
||||
<changelist id="9c238110-7b79-4fb8-a517-1a6ad61b867f" name="Changes" comment="" />
|
||||
<created>1733702642308</created>
|
||||
<option name="number" value="Default" />
|
||||
<option name="presentableId" value="Default" />
|
||||
<updated>1733702642308</updated>
|
||||
<workItem from="1733702643366" duration="34000" />
|
||||
<workItem from="1733702709776" duration="35920000" />
|
||||
<workItem from="1733851235937" duration="19449000" />
|
||||
<workItem from="1733939842723" duration="14770000" />
|
||||
<workItem from="1734029532042" duration="137000" />
|
||||
<workItem from="1734403691061" duration="3000" />
|
||||
<workItem from="1735592453031" duration="11224000" />
|
||||
<workItem from="1736192324957" duration="355000" />
|
||||
<workItem from="1736204332671" duration="5499000" />
|
||||
<workItem from="1736295645857" duration="5415000" />
|
||||
<workItem from="1736362779013" duration="9601000" />
|
||||
<workItem from="1736446032240" duration="11517000" />
|
||||
</task>
|
||||
<servers />
|
||||
</component>
|
||||
<component name="TypeScriptGeneratedFilesManager">
|
||||
<option name="version" value="3" />
|
||||
</component>
|
||||
<component name="VCPKGProject">
|
||||
<isAutomaticCheckingOnLaunch value="false" />
|
||||
<isAutomaticFoundErrors value="true" />
|
||||
<isAutomaticReloadCMake value="true" />
|
||||
</component>
|
||||
<component name="XSLT-Support.FileAssociations.UIState">
|
||||
<expand />
|
||||
<select />
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,44 @@
|
|||
cmake_minimum_required(VERSION 3.25)
|
||||
project(COSC-4P80-Final-Project VERSION 0.0.26)
|
||||
|
||||
option(ENABLE_ADDRSAN "Enable the address sanitizer" OFF)
|
||||
option(ENABLE_UBSAN "Enable the ub sanitizer" OFF)
|
||||
option(ENABLE_TSAN "Enable the thread data race sanitizer" OFF)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
add_subdirectory(lib/blt-with-graphics)
|
||||
|
||||
add_compile_options("-fopenmp")
|
||||
add_link_options("-fopenmp")
|
||||
|
||||
fetchcontent_declare(dlib
|
||||
URL http://dlib.net/files/dlib-19.24.tar.bz2
|
||||
URL_HASH MD5=8a98957a73eebd3cd7431c2bac79665f
|
||||
FIND_PACKAGE_ARGS)
|
||||
fetchcontent_makeavailable(dlib)
|
||||
|
||||
include_directories(include/)
|
||||
file(GLOB_RECURSE PROJECT_BUILD_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
|
||||
|
||||
add_executable(COSC-4P80-Final-Project ${PROJECT_BUILD_FILES})
|
||||
|
||||
target_compile_options(COSC-4P80-Final-Project PRIVATE -Wall -Wextra -Wpedantic -Wno-comment)
|
||||
target_link_options(COSC-4P80-Final-Project PRIVATE -Wall -Wextra -Wpedantic -Wno-comment)
|
||||
|
||||
target_link_libraries(COSC-4P80-Final-Project PRIVATE BLT_WITH_GRAPHICS dlib)
|
||||
|
||||
if (${ENABLE_ADDRSAN} MATCHES ON)
|
||||
target_compile_options(COSC-4P80-Final-Project PRIVATE -fsanitize=address)
|
||||
target_link_options(COSC-4P80-Final-Project PRIVATE -fsanitize=address)
|
||||
endif ()
|
||||
|
||||
if (${ENABLE_UBSAN} MATCHES ON)
|
||||
target_compile_options(COSC-4P80-Final-Project PRIVATE -fsanitize=undefined)
|
||||
target_link_options(COSC-4P80-Final-Project PRIVATE -fsanitize=undefined)
|
||||
endif ()
|
||||
|
||||
if (${ENABLE_TSAN} MATCHES ON)
|
||||
target_compile_options(COSC-4P80-Final-Project PRIVATE -fsanitize=thread)
|
||||
target_link_options(COSC-4P80-Final-Project PRIVATE -fsanitize=thread)
|
||||
endif ()
|
|
@ -1,5 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.25)
|
||||
project(COSC-4P80-Final-Project VERSION 0.0.26)
|
||||
project(COSC-4P80-Final-Project VERSION 0.0.30)
|
||||
|
||||
option(ENABLE_ADDRSAN "Enable the address sanitizer" OFF)
|
||||
option(ENABLE_UBSAN "Enable the ub sanitizer" OFF)
|
||||
|
|
|
@ -24,8 +24,8 @@ USER_HOME = Path.home()
|
|||
ENVIRONMENT_DATA_LOCATION = USER_HOME / ".brett_scripts.env"
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
CONFIG_FILE_DIRECTORY = Path(os.getenv('APPDATA') + "\BLT")
|
||||
CONFIG_FILE_LOCATION = Path(CONFIG_FILE_DIRECTORY + "\commit_config.env")
|
||||
CONFIG_FILE_DIRECTORY = Path(os.getenv('APPDATA') + "\\BLT")
|
||||
CONFIG_FILE_LOCATION = Path(CONFIG_FILE_DIRECTORY + "\\commit_config.env")
|
||||
else:
|
||||
XDG_CONFIG_HOME = os.environ.get('XDG_CONFIG_HOME')
|
||||
if XDG_CONFIG_HOME is None:
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
import sys
|
||||
|
||||
def plot_stacked_graph(title, output, csv_file1, csv_file2, name1, name2, position, position2):
|
||||
# Read CSV files
|
||||
data1 = pd.read_csv(csv_file1, header=0)
|
||||
data2 = pd.read_csv(csv_file2, header=0)
|
||||
|
||||
# Extract column titles
|
||||
x1_label, y1_label = data1.columns[0], data1.columns[1]
|
||||
x2_label, y2_label = data2.columns[0], data2.columns[1]
|
||||
|
||||
# Extract data
|
||||
x1, y1 = data1[x1_label], data1[y1_label]
|
||||
x2, y2 = data2[x2_label], data2[y2_label]
|
||||
|
||||
# Create the plot
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
line1, = ax.plot(x1, y1, label=f"{name1}")
|
||||
line2, = ax.plot(x2, y2, label=f"{name2}")
|
||||
|
||||
if position < 2 ** 32:
|
||||
ax.axvline(x=position, color='red', linestyle='--')
|
||||
ax.text(position, ax.get_ylim()[1] * 0.95, f"Feed-forward average\n # of epochs ({position})", color=line2.get_color(), fontsize=10, ha='right', va='top', backgroundcolor='white')
|
||||
if position2 < 2 ** 32:
|
||||
ax.axvline(x=position2, color='red', linestyle='--')
|
||||
ax.text(position2, ax.get_ylim()[1] * 0.75, f"Deep learning average\n # of epochs ({position2})", color=line1.get_color(), fontsize=10, ha='right', va='top', backgroundcolor='white')
|
||||
|
||||
ax.set_xlabel(x1_label)
|
||||
ax.set_ylabel(y1_label)
|
||||
ax.legend()
|
||||
ax.set_title(title, fontsize=12)
|
||||
|
||||
plt.savefig(output)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 9:
|
||||
print("Usage: python script.py <title> <output_file> <csv_file1> <csv_file2> <csv1_name> <csv2_name> <pos1> <pos2>")
|
||||
sys.exit(1)
|
||||
|
||||
csv_file1 = sys.argv[3]
|
||||
csv_file2 = sys.argv[4]
|
||||
title = sys.argv[1]
|
||||
output = sys.argv[2]
|
||||
position = sys.argv[5]
|
||||
position2 = sys.argv[6]
|
||||
|
||||
plot_stacked_graph(title, output, csv_file1, csv_file2, position, position2, int(sys.argv[7]), int(sys.argv[8]))
|
|
@ -1,2 +1,2 @@
|
|||
\chapter*{Abstract}
|
||||
Deep Learning folds feature extraction into the traditional neural network architecture. This paper will use the MNIST handwritten digit dataset to show the benefit in using deep learning techniques.
|
||||
Deep learning integrates feature extraction directly into the traditional neural network architecture, improving overall performance. This paper explores the benefits of deep learning by using the MNIST handwritten digit dataset. We compare two different network configurations: one with feature extraction and feed-forward classification, and the other with only the feed-forward classification. Our results demonstrate that as the size of the feed-forward network is reduced, its classification performance decreases. However, by leveraging feature extraction, we are able to retain classification power, showing the value of deep learning in improving performance with smaller networks.
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\@setckpt{chapters/conclusion}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
\setcounter{enumiii}{0}
|
||||
\setcounter{enumiv}{0}
|
||||
\setcounter{footnote}{0}
|
||||
\setcounter{mpfootnote}{0}
|
||||
\setcounter{part}{0}
|
||||
\setcounter{chapter}{2}
|
||||
\setcounter{section}{2}
|
||||
\setcounter{subsection}{0}
|
||||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
\setcounter{subparagraph}{0}
|
||||
\setcounter{figure}{0}
|
||||
\setcounter{table}{0}
|
||||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
\setcounter{float@type}{8}
|
||||
\setcounter{algorithm}{0}
|
||||
\setcounter{ALG@line}{0}
|
||||
\setcounter{ALG@rem}{0}
|
||||
\setcounter{ALG@nested}{0}
|
||||
\setcounter{ALG@Lnr}{2}
|
||||
\setcounter{ALG@blocknr}{10}
|
||||
\setcounter{ALG@storecount}{0}
|
||||
\setcounter{ALG@tmpcounter}{0}
|
||||
\setcounter{LT@tables}{0}
|
||||
\setcounter{LT@chunks}{0}
|
||||
\setcounter{section@level}{4}
|
||||
\setcounter{Item}{0}
|
||||
\setcounter{Hfootnote}{0}
|
||||
\setcounter{bookmark@seq@number}{5}
|
||||
}
|
|
@ -1,18 +1,27 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\bibstyle{plain}
|
||||
\bibdata{refs}
|
||||
\@writefile{toc}{\contentsline {chapter}{\numberline {4}Conclusion}{9}{chapter.4}\protected@file@percent }
|
||||
\@writefile{lof}{\addvspace {10\p@ }}
|
||||
\@writefile{lot}{\addvspace {10\p@ }}
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{9}{section*.26}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{9}{section*.27}\protected@file@percent }
|
||||
\bibcite{dlib09}{1}
|
||||
\citation{dlib09}
|
||||
\@setckpt{chapters/conclusion}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{page}{11}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
\setcounter{enumiii}{0}
|
||||
\setcounter{enumiv}{0}
|
||||
\setcounter{enumiv}{1}
|
||||
\setcounter{footnote}{0}
|
||||
\setcounter{mpfootnote}{0}
|
||||
\setcounter{part}{0}
|
||||
\setcounter{chapter}{2}
|
||||
\setcounter{section}{2}
|
||||
\setcounter{chapter}{4}
|
||||
\setcounter{section}{0}
|
||||
\setcounter{subsection}{0}
|
||||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
|
@ -22,7 +31,7 @@
|
|||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{caption@flags}{2}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
|
@ -40,5 +49,5 @@
|
|||
\setcounter{section@level}{4}
|
||||
\setcounter{Item}{0}
|
||||
\setcounter{Hfootnote}{0}
|
||||
\setcounter{bookmark@seq@number}{5}
|
||||
\setcounter{bookmark@seq@number}{9}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
\chapter{Conclusion}
|
||||
\paragraph{}
|
||||
We have demonstrated that feature extraction through deep learning is a powerful tool for enhancing neural network performance. This is particularly evident when addressing larger-scale problems, such as the ImageNet1000 dataset, which contains one thousand different classification labels. Instead of training a network to process entire images across all labels, feature extraction allows for a more efficient approach, where the network is trained on extracted features rather than raw data. With a sufficiently large and reasonable deep learning network, problems like ImageNet can be effectively tackled, where traditional feed-forward networks would likely struggle.
|
||||
\paragraph{}
|
||||
However, this work has clear limitations. We only conducted two experiments and did not control for the number of parameters between the feed-forward network and the feature extraction step. Future work could address this by testing with feed-forward networks that have larger and more layers, thus compensating for the parameter discrepancy between the two configurations. Despite these limitations, we believe this paper has successfully shown that feature extraction in deep learning is a valuable and powerful tool.
|
||||
|
||||
\bibliographystyle{plain}
|
||||
\bibliography{refs}
|
||||
\nocite{dlib09}
|
|
@ -0,0 +1,58 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction}{1}{chapter.1}\protected@file@percent }
|
||||
\@writefile{lof}{\addvspace {10\p@ }}
|
||||
\@writefile{lot}{\addvspace {10\p@ }}
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{1}{section*.4}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{1}{section*.5}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{1}{section*.6}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {chapter}{\numberline {2}Experimental Setup}{2}{chapter.2}\protected@file@percent }
|
||||
\@writefile{lof}{\addvspace {10\p@ }}
|
||||
\@writefile{lot}{\addvspace {10\p@ }}
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.7}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.8}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Experiment 2}{2}{section.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.9}\protected@file@percent }
|
||||
\@setckpt{chapters/introduction_and_motivation}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
\setcounter{enumiii}{0}
|
||||
\setcounter{enumiv}{0}
|
||||
\setcounter{footnote}{0}
|
||||
\setcounter{mpfootnote}{0}
|
||||
\setcounter{part}{0}
|
||||
\setcounter{chapter}{2}
|
||||
\setcounter{section}{2}
|
||||
\setcounter{subsection}{0}
|
||||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
\setcounter{subparagraph}{0}
|
||||
\setcounter{figure}{0}
|
||||
\setcounter{table}{0}
|
||||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
\setcounter{float@type}{8}
|
||||
\setcounter{algorithm}{0}
|
||||
\setcounter{ALG@line}{0}
|
||||
\setcounter{ALG@rem}{0}
|
||||
\setcounter{ALG@nested}{0}
|
||||
\setcounter{ALG@Lnr}{2}
|
||||
\setcounter{ALG@blocknr}{10}
|
||||
\setcounter{ALG@storecount}{0}
|
||||
\setcounter{ALG@tmpcounter}{0}
|
||||
\setcounter{LT@tables}{0}
|
||||
\setcounter{LT@chunks}{0}
|
||||
\setcounter{section@level}{4}
|
||||
\setcounter{Item}{0}
|
||||
\setcounter{Hfootnote}{0}
|
||||
\setcounter{bookmark@seq@number}{5}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
\chapter{Introduction}
|
||||
\paragraph{}As previously mentioned, deep learning combines feature extraction through convolution and pooling with traditional neural networks, eliminating the need for humans to manually extract features from datasets. Convolution, in essence, is a filtering process where trained filter(s) slides over the input data to extract features and other useful information. Pooling is the subsequent process of taking local samples and selecting either the minimum, maximum, or average of those samples. This step helps identify feature locations and condenses the information produced by the convolution layer.
|
||||
|
||||
\paragraph{}A typical deep learning pipeline consists of several convolution and pooling layers, followed by a few fully connected layers. In this work, we aim to demonstrate that using a deep learning configuration can reduce the size of the feed-forward section without compromising program performance, thereby highlighting the effectiveness of deep learning.
|
||||
|
||||
\paragraph{}The MNIST database is a standard benchmark for image-processing neural networks. For our comparison, we will use a modified version of the DLIB deep learning example. This approach allows us to showcase the differences between standard feed-forward neural networks and deep learning networks without requiring expensive GPUs or AI accelerators. While the MNIST dataset is solvable using feed-forward neural networks, we intend to demonstrate that deep learning can achieve better classification performance, even on smaller networks.
|
||||
|
||||
\chapter{Experimental Setup}
|
||||
\paragraph{}
|
||||
Our experiments are divided into two parts, each testing a deep learning network alongside its corresponding feed-forward network. For a fair comparison, the feed-forward test focuses explicitly on the feed-forward component of the deep learning network. This ensures that variables such as the number of layers or nodes in the feed-forward section remain consistent, minimizing potential biases and maintaining the integrity of our comparisons.
|
||||
|
||||
\section{Experiment 1}
|
||||
\paragraph{}
|
||||
Our first experiment compares using the included example from the DLIB C++ library. Specifically the deep learning test consists of
|
||||
|
||||
\section{Experiment 2}
|
||||
\paragraph{}
|
|
@ -11,12 +11,22 @@
|
|||
\@writefile{lof}{\addvspace {10\p@ }}
|
||||
\@writefile{lot}{\addvspace {10\p@ }}
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.7}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.8}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Experiment 2}{2}{section.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{2}{section*.9}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{3}{section*.10}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{3}{section*.11}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{3}{section*.12}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {2.1}{\ignorespaces Experiment 1 network configuration.\relax }}{3}{figure.caption.13}\protected@file@percent }
|
||||
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
|
||||
\newlabel{fig:screenshot003}{{2.1}{3}{Experiment 1 network configuration.\relax }{figure.caption.13}{}}
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Experiment 2}{3}{section.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{3}{section*.14}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{4}{section*.15}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {2.2}{\ignorespaces Experiment 2 network configuration.\relax }}{4}{figure.caption.16}\protected@file@percent }
|
||||
\newlabel{fig:screenshot004}{{2.2}{4}{Experiment 2 network configuration.\relax }{figure.caption.16}{}}
|
||||
\@setckpt{chapters/introduction_and_motivation}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{page}{5}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
|
@ -31,12 +41,12 @@
|
|||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
\setcounter{subparagraph}{0}
|
||||
\setcounter{figure}{0}
|
||||
\setcounter{figure}{2}
|
||||
\setcounter{table}{0}
|
||||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{caption@flags}{2}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
|
|
|
@ -1,17 +1,48 @@
|
|||
\chapter{Introduction}
|
||||
\paragraph{}As previously mentioned, deep learning combines feature extraction through convolution and pooling with traditional neural networks, eliminating the need for humans to manually extract features from datasets. Convolution, in essence, is a filtering process where trained filter(s) slides over the input data to extract features and other useful information. Pooling is the subsequent process of taking local samples and selecting either the minimum, maximum, or average of those samples. This step helps identify feature locations and condenses the information produced by the convolution layer.
|
||||
|
||||
\paragraph{}A typical deep learning pipeline consists of several convolution and pooling layers, followed by a few fully connected layers. In this work, we aim to demonstrate that using a deep learning configuration can reduce the size of the feed-forward section without compromising program performance, thereby highlighting the effectiveness of deep learning.
|
||||
\paragraph{}A typical deep learning pipeline consists of several convolution and pooling layers, followed by a few fully connected layers. In this work, we aim to demonstrate that using a deep learning network configuration can reduce the size of the feed-forward section without compromising program classification performance, thereby highlighting the effectiveness of deep learning.
|
||||
|
||||
\paragraph{}The MNIST database is a standard benchmark for image-processing neural networks. For our comparison, we will use a modified version of the DLIB deep learning example. This approach allows us to showcase the differences between standard feed-forward neural networks and deep learning networks without requiring expensive GPUs or AI accelerators. While the MNIST dataset is solvable using feed-forward neural networks, we intend to demonstrate that deep learning can achieve better classification performance, even on smaller networks.
|
||||
\paragraph{}The MNIST database is a standard benchmark for image-processing neural networks. For our comparison, we will use a modified version of the DLIB deep learning example. This approach allows us to showcase the differences between standard feed-forward neural networks and deep learning networks without requiring expensive GPUs or AI accelerators. While the MNIST dataset is solvable using only feed-forward neural networks, we intend to demonstrate that feature extraction with deep learning can achieve better prediction accuracy, even with smaller classification networks.
|
||||
|
||||
\chapter{Experimental Setup}
|
||||
\paragraph{}
|
||||
Our experiments are divided into two parts, each testing a deep learning network alongside its corresponding feed-forward network. For a fair comparison, the feed-forward test focuses explicitly on the feed-forward component of the deep learning network. This ensures that variables such as the number of layers or nodes in the feed-forward section remain consistent, minimizing potential biases and maintaining the integrity of our comparisons.
|
||||
The MNIST database comprises grayscale images of size 28x28 pixels, with 60,000 training images and 10,000 test images. For each experiment, we present graphs comparing the average error per epoch for both configurations, alongside a table summarizing the test results of the final network after training. Due to resource constraints, training is limited to a maximum of 100 epochs, and the experiments are averaged over ten runs. Notably, the deep learning configuration requires approximately six hours to complete on a 32-thread workstation.
|
||||
|
||||
\paragraph{}
|
||||
Our experiments are divided into two phases, each testing a deep learning network alongside its corresponding feed-forward network. To ensure a fair comparison, the feed-forward test focuses exclusively on the feed-forward component of the deep learning network. This approach ensures consistency in variables such as the number of layers or nodes in the feed-forward section, minimizing potential biases and preserving the integrity of the results.
|
||||
|
||||
\section{Experiment 1}
|
||||
\paragraph{}
|
||||
Our first experiment compares using the included example from the DLIB C++ library. Specifically the deep learning test consists of
|
||||
The first experiment compares the performance of a deep learning configuration to a baseline feed-forward network, using the example provided in the DLIB C++ library. The deep learning configuration consists of two ReLU-activated convolutional layers, each followed by max-pooling and then a fully connected feed-forward network.
|
||||
|
||||
\paragraph{}
|
||||
The first convolutional layer uses six filters, each sized 5x5, with a stride of 1x1. The second convolutional layer applies sixteen filters with the same size and stride configuration. After each convolutional operation, the output is passed through a max-pooling layer with a filter size of 2x2 and a stride of 2x2, which reduces the spatial dimensions.
|
||||
|
||||
\paragraph{}
|
||||
The pooled features are then fed into a three-layer fully connected ReLU-activated feed-forward network. The fully connected layers consist of 120 neurons in the first layer, 84 neurons in the second, and 10 neurons in the final output layer, with each output representing a predicted class for the input image.
|
||||
|
||||
\paragraph{}
|
||||
For comparison, the baseline configuration omits the convolutional and pooling layers and consists solely of the three-layer feed-forward network. This setup isolates the feed-forward network’s performance, enabling a direct comparison with the deep learning configuration.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{screenshot003}
|
||||
\caption{Experiment 1 network configuration.}
|
||||
\label{fig:screenshot003}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\section{Experiment 2}
|
||||
\paragraph{}
|
||||
\paragraph{}
|
||||
The second experiment retains the same convolutional and pooling configurations as in Experiment 1 but modifies the number of neurons in the feed-forward section of the network. This adjustment reduces the number of parameters available for object detection and classification, allowing for an evaluation of how deep learning's hierarchical feature extraction compensates for reduced network capacity.
|
||||
|
||||
\paragraph{}
|
||||
By demonstrating the impact of parameter constraints, this experiment highlights the advantage of feature extraction in the deep learning configuration, particularly when resources in the feed-forward section are limited. This serves to underscore the practical utility of convolutional layers in achieving robust classification performance with smaller, more efficient networks.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{screenshot004}
|
||||
\caption{Experiment 2 network configuration.}
|
||||
\label{fig:screenshot004}
|
||||
\end{figure}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\@setckpt{chapters/results}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
\setcounter{enumiii}{0}
|
||||
\setcounter{enumiv}{0}
|
||||
\setcounter{footnote}{0}
|
||||
\setcounter{mpfootnote}{0}
|
||||
\setcounter{part}{0}
|
||||
\setcounter{chapter}{2}
|
||||
\setcounter{section}{2}
|
||||
\setcounter{subsection}{0}
|
||||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
\setcounter{subparagraph}{0}
|
||||
\setcounter{figure}{0}
|
||||
\setcounter{table}{0}
|
||||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
\setcounter{float@type}{8}
|
||||
\setcounter{algorithm}{0}
|
||||
\setcounter{ALG@line}{0}
|
||||
\setcounter{ALG@rem}{0}
|
||||
\setcounter{ALG@nested}{0}
|
||||
\setcounter{ALG@Lnr}{2}
|
||||
\setcounter{ALG@blocknr}{10}
|
||||
\setcounter{ALG@storecount}{0}
|
||||
\setcounter{ALG@tmpcounter}{0}
|
||||
\setcounter{LT@tables}{0}
|
||||
\setcounter{LT@chunks}{0}
|
||||
\setcounter{section@level}{4}
|
||||
\setcounter{Item}{0}
|
||||
\setcounter{Hfootnote}{0}
|
||||
\setcounter{bookmark@seq@number}{5}
|
||||
}
|
|
@ -1,8 +1,26 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\@writefile{toc}{\contentsline {chapter}{\numberline {3}Results}{5}{chapter.3}\protected@file@percent }
|
||||
\@writefile{lof}{\addvspace {10\p@ }}
|
||||
\@writefile{lot}{\addvspace {10\p@ }}
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {3.1}Experiment 1}{5}{section.3.1}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{5}{section*.17}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{5}{section*.18}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{5}{section*.19}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {3.1}{\ignorespaces Experiment 1 Feed-Forward vs Deep Learning, average loss while training.\relax }}{6}{figure.caption.20}\protected@file@percent }
|
||||
\newlabel{fig:ex1loss}{{3.1}{6}{Experiment 1 Feed-Forward vs Deep Learning, average loss while training.\relax }{figure.caption.20}{}}
|
||||
\@writefile{lot}{\contentsline {table}{\numberline {3.1}{\ignorespaces Experiment 1 results on testing dataset. (Averaged over 10 runs)\relax }}{6}{table.caption.21}\protected@file@percent }
|
||||
\newlabel{tbl:ex1}{{3.1}{6}{Experiment 1 results on testing dataset. (Averaged over 10 runs)\relax }{table.caption.21}{}}
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {3.2}Experiment 2}{7}{section.3.2}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{7}{section*.22}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {paragraph}{}{7}{section*.23}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {3.2}{\ignorespaces Experiment 2 Feed-Forward vs Deep Learning, average loss while training.\relax }}{8}{figure.caption.24}\protected@file@percent }
|
||||
\newlabel{fig:ex2loss}{{3.2}{8}{Experiment 2 Feed-Forward vs Deep Learning, average loss while training.\relax }{figure.caption.24}{}}
|
||||
\@writefile{lot}{\contentsline {table}{\numberline {3.2}{\ignorespaces Experiment 2 results on testing dataset. (Averaged over 10 runs)\relax }}{8}{table.caption.25}\protected@file@percent }
|
||||
\newlabel{tbl:ex2}{{3.2}{8}{Experiment 2 results on testing dataset. (Averaged over 10 runs)\relax }{table.caption.25}{}}
|
||||
\@setckpt{chapters/results}{
|
||||
\setcounter{page}{3}
|
||||
\setcounter{page}{9}
|
||||
\setcounter{equation}{0}
|
||||
\setcounter{enumi}{0}
|
||||
\setcounter{enumii}{0}
|
||||
|
@ -11,18 +29,18 @@
|
|||
\setcounter{footnote}{0}
|
||||
\setcounter{mpfootnote}{0}
|
||||
\setcounter{part}{0}
|
||||
\setcounter{chapter}{2}
|
||||
\setcounter{chapter}{3}
|
||||
\setcounter{section}{2}
|
||||
\setcounter{subsection}{0}
|
||||
\setcounter{subsubsection}{0}
|
||||
\setcounter{paragraph}{0}
|
||||
\setcounter{subparagraph}{0}
|
||||
\setcounter{figure}{0}
|
||||
\setcounter{table}{0}
|
||||
\setcounter{figure}{2}
|
||||
\setcounter{table}{2}
|
||||
\setcounter{parentequation}{0}
|
||||
\setcounter{svg@param@lastpage}{0}
|
||||
\setcounter{svg@param@currpage}{-1}
|
||||
\setcounter{caption@flags}{0}
|
||||
\setcounter{caption@flags}{2}
|
||||
\setcounter{continuedfloat}{0}
|
||||
\setcounter{subfigure}{0}
|
||||
\setcounter{subtable}{0}
|
||||
|
@ -40,5 +58,5 @@
|
|||
\setcounter{section@level}{4}
|
||||
\setcounter{Item}{0}
|
||||
\setcounter{Hfootnote}{0}
|
||||
\setcounter{bookmark@seq@number}{5}
|
||||
\setcounter{bookmark@seq@number}{8}
|
||||
}
|
||||
|
|
|
@ -1 +1,63 @@
|
|||
|
||||
\chapter{Results}
|
||||
\section{Experiment 1}
|
||||
\paragraph{}
|
||||
Our testing results, presented in Table \ref{tbl:ex1}, demonstrate that both classical feed-forward networks and deep learning configurations are capable of achieving near-perfect classification on the testing dataset with a reasonably sized neural network. The deep learning network achieved an exceptional performance, converging in only 20 epochs with a final classification accuracy of 99\%. In comparison, the feed-forward network reached an accuracy of 98\% but required nearly all the allotted epochs to reach this level of performance. While it is possible that the feed-forward network could match the deep learning network's performance with additional training epochs, resource constraints precluded this, and such an extension was unnecessary for the purposes of Experiment 1.
|
||||
\paragraph{}
|
||||
These findings suggest the presence of an underlying pattern within the Arabic numeral system that can be effectively learned by both approaches. Furthermore, as illustrated in Figure \ref{fig:ex1loss}, the deep learning configuration demonstrated substantially faster convergence, underscoring the efficiency of convolutional layers in feature extraction and their role in expediting convergence in the training process.
|
||||
\paragraph{}
|
||||
It is important to note that throughout this experiment, the deep learning component accounted for the majority of the runtime. This is acceptable, as our primary goal is to demonstrate the benefit of feature extraction on classification performance, rather than to assess computational efficiency. While deep learning may be outclassed for a relatively simple problem like this, it becomes a more viable option for more complex tasks. In such cases, feature extraction could offer a significant advantage, as the large size of a feed-forward-only network may become impractical.
|
||||
|
||||
|
||||
\begin{center}
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{screenshot001}
|
||||
\caption{Experiment 1 Feed-Forward vs Deep Learning, average loss while training.}
|
||||
\label{fig:ex1loss}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\begin{tabular}{|c|c|c|c|}
|
||||
\hline
|
||||
Test & Correct & Incorrect & Accuracy (\%) \\
|
||||
\hline
|
||||
Feed-Forward & 9800 & 199 & 98\\
|
||||
\hline
|
||||
Deep Learning & 9898 & 101 & 99\\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Experiment 1 results on testing dataset. (Averaged over 10 runs)}
|
||||
\label{tbl:ex1}
|
||||
\end{table}
|
||||
|
||||
\section{Experiment 2}
|
||||
\paragraph{}
|
||||
In Experiment 2, the benefits of deep learning are evident, as shown in Table \ref{tbl:ex2}. Despite the significantly reduced size of the classification network, the deep learning configuration maintains the 99\% accuracy achieved in Experiment 1. In contrast, the feed-forward network's performance declines, now achieving only 96\% accuracy. Notably, the deep learning network still converges within 20 epochs, similar to Experiment 1. While the feed-forward network roughly converges within 40 epochs and continues to improve throughout the remainder of the run, it never reaches the peak classification accuracy observed in Experiment 1.
|
||||
\paragraph{}
|
||||
It could certainly be argued that the reduced size of the feed-forward network is the primary cause of the observed decrease in performance. However, this only serves to highlight the value of feature extraction. Feature extraction allows for the provision of more information-dense data to the classification network, and, as clearly demonstrated in this experiment, it significantly enhances overall performance. While allowing the feed-forward network more training time might help improve its results, the benefit of feature extraction in the deep learning configuration remains evident.
|
||||
|
||||
\begin{center}
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{screenshot002}
|
||||
\caption{Experiment 2 Feed-Forward vs Deep Learning, average loss while training.}
|
||||
\label{fig:ex2loss}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\begin{tabular}{|c|c|c|c|}
|
||||
\hline
|
||||
Test & Correct & Incorrect & Accuracy (\%) \\
|
||||
\hline
|
||||
Feed-Forward & 9588 & 411 & 96\\
|
||||
\hline
|
||||
Deep Learning & 9887 & 112 & 99\\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Experiment 2 results on testing dataset. (Averaged over 10 runs)}
|
||||
\label{tbl:ex2}
|
||||
\end{table}
|
|
@ -0,0 +1,8 @@
|
|||
@Article{dlib09,
|
||||
author = {Davis E. King},
|
||||
title = {Dlib-ml: A Machine Learning Toolkit},
|
||||
journal = {Journal of Machine Learning Research},
|
||||
year = {2009},
|
||||
volume = {10},
|
||||
pages = {1755-1758},
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
\relax
|
||||
\providecommand{\transparent@use}[1]{}
|
||||
\AC@reset@newl@bel
|
||||
\providecommand\hyper@newdestlabel[2]{}
|
||||
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
|
||||
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
|
||||
\global\let\oldnewlabel\newlabel
|
||||
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
|
||||
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
|
||||
\AtEndDocument{\ifx\hyper@anchor\@undefined
|
||||
\let\newlabel\oldnewlabel
|
||||
\fi}
|
||||
\fi}
|
||||
\global\let\hyper@last\relax
|
||||
\gdef\HyperFirstAtBeginDocument#1{#1}
|
||||
\providecommand\HyField@AuxAddToFields[1]{}
|
||||
\providecommand\HyField@AuxAddToCoFields[2]{}
|
||||
\@input{chapters/abstract.aux}
|
||||
\@writefile{toc}{\contentsline {chapter}{Table of Contents}{II}{section*.2}\protected@file@percent }
|
||||
\@input{chapters/introduction_and_motivation.aux}
|
||||
\@input{chapters/results.aux}
|
||||
\@input{chapters/conclusion.aux}
|
||||
\gdef\svg@ink@ver@settings{{\m@ne }{inkscape}{\m@ne }}
|
||||
\gdef \@abspage@last{5}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
\BOOKMARK [0][-]{section*.2}{\376\377\000T\000a\000b\000l\000e\000\040\000o\000f\000\040\000C\000o\000n\000t\000e\000n\000t\000s}{}% 1
|
||||
\BOOKMARK [0][-]{chapter.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 2
|
||||
\BOOKMARK [0][-]{chapter.2}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000a\000l\000\040\000S\000e\000t\000u\000p}{}% 3
|
||||
\BOOKMARK [1][-]{section.2.1}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0001}{chapter.2}% 4
|
||||
\BOOKMARK [1][-]{section.2.2}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0002}{chapter.2}% 5
|
Binary file not shown.
|
@ -0,0 +1,11 @@
|
|||
\contentsline {chapter}{Table of Contents}{II}{section*.2}%
|
||||
\contentsline {chapter}{\numberline {1}Introduction}{1}{chapter.1}%
|
||||
\contentsline {paragraph}{}{1}{section*.4}%
|
||||
\contentsline {paragraph}{}{1}{section*.5}%
|
||||
\contentsline {paragraph}{}{1}{section*.6}%
|
||||
\contentsline {chapter}{\numberline {2}Experimental Setup}{2}{chapter.2}%
|
||||
\contentsline {paragraph}{}{2}{section*.7}%
|
||||
\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}%
|
||||
\contentsline {paragraph}{}{2}{section*.8}%
|
||||
\contentsline {section}{\numberline {2.2}Experiment 2}{2}{section.2.2}%
|
||||
\contentsline {paragraph}{}{2}{section*.9}%
|
|
@ -21,4 +21,4 @@
|
|||
\@input{chapters/results.aux}
|
||||
\@input{chapters/conclusion.aux}
|
||||
\gdef\svg@ink@ver@settings{{\m@ne }{inkscape}{\m@ne }}
|
||||
\gdef \@abspage@last{5}
|
||||
\gdef \@abspage@last{13}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
\begin{thebibliography}{1}
|
||||
|
||||
\bibitem{dlib09}
|
||||
Davis~E. King.
|
||||
\newblock Dlib-ml: A machine learning toolkit.
|
||||
\newblock {\em Journal of Machine Learning Research}, 10:1755--1758, 2009.
|
||||
|
||||
\end{thebibliography}
|
|
@ -0,0 +1,50 @@
|
|||
This is BibTeX, Version 0.99d (TeX Live 2022/Debian)
|
||||
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
|
||||
The top-level auxiliary file: report.aux
|
||||
A level-1 auxiliary file: chapters/abstract.aux
|
||||
A level-1 auxiliary file: chapters/introduction_and_motivation.aux
|
||||
A level-1 auxiliary file: chapters/results.aux
|
||||
A level-1 auxiliary file: chapters/conclusion.aux
|
||||
The style file: plain.bst
|
||||
Database file #1: refs.bib
|
||||
You've used 1 entry,
|
||||
2118 wiz_defined-function locations,
|
||||
508 strings with 4198 characters,
|
||||
and the built_in function-call counts, 320 in all, are:
|
||||
= -- 31
|
||||
> -- 7
|
||||
< -- 0
|
||||
+ -- 3
|
||||
- -- 2
|
||||
* -- 22
|
||||
:= -- 59
|
||||
add.period$ -- 3
|
||||
call.type$ -- 1
|
||||
change.case$ -- 4
|
||||
chr.to.int$ -- 0
|
||||
cite$ -- 1
|
||||
duplicate$ -- 11
|
||||
empty$ -- 29
|
||||
format.name$ -- 2
|
||||
if$ -- 65
|
||||
int.to.chr$ -- 0
|
||||
int.to.str$ -- 1
|
||||
missing$ -- 1
|
||||
newline$ -- 8
|
||||
num.names$ -- 2
|
||||
pop$ -- 2
|
||||
preamble$ -- 1
|
||||
purify$ -- 3
|
||||
quote$ -- 0
|
||||
skip$ -- 7
|
||||
stack$ -- 0
|
||||
substring$ -- 32
|
||||
swap$ -- 1
|
||||
text.length$ -- 0
|
||||
text.prefix$ -- 0
|
||||
top$ -- 0
|
||||
type$ -- 4
|
||||
warning$ -- 0
|
||||
while$ -- 3
|
||||
width$ -- 2
|
||||
write$ -- 13
|
110
latex/report.log
110
latex/report.log
|
@ -1,4 +1,4 @@
|
|||
This is pdfTeX, Version 3.141592653-2.6-1.40.24 (TeX Live 2022/Debian) (preloaded format=pdflatex 2023.10.9) 9 JAN 2025 15:06
|
||||
This is pdfTeX, Version 3.141592653-2.6-1.40.24 (TeX Live 2022/Debian) (preloaded format=pdflatex 2023.10.9) 10 JAN 2025 14:53
|
||||
entering extended mode
|
||||
restricted \write18 enabled.
|
||||
%&-line parsing enabled.
|
||||
|
@ -806,7 +806,7 @@ Package hyperref Info: Link coloring OFF on input line 57.
|
|||
(/usr/share/texlive/texmf-dist/tex/latex/arabtex/alocal.sty
|
||||
(ArabTeX) 3.11 local stub, 26.02.2006)
|
||||
(ArabTeX) version 3.11s (02.07.2006): second phase of patching)
|
||||
<assets/brock.jpg, id=27, 1053.9375pt x 638.385pt>
|
||||
<assets/brock.jpg, id=43, 1053.9375pt x 638.385pt>
|
||||
File: assets/brock.jpg Graphic file (type jpg)
|
||||
<use assets/brock.jpg>
|
||||
Package pdftex.def Info: assets/brock.jpg used on input line 67.
|
||||
|
@ -836,12 +836,12 @@ l.94 \include{chapters/abstract}
|
|||
|
||||
|
||||
] (./report.toc
|
||||
LaTeX Font Info: Trying to load font information for U+msa on input line 8.
|
||||
LaTeX Font Info: Trying to load font information for U+msa on input line 9.
|
||||
|
||||
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsa.fd
|
||||
File: umsa.fd 2013/01/14 v3.01 AMS symbols A
|
||||
)
|
||||
LaTeX Font Info: Trying to load font information for U+msb on input line 8.
|
||||
LaTeX Font Info: Trying to load font information for U+msb on input line 9.
|
||||
|
||||
(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsb.fd
|
||||
File: umsb.fd 2013/01/14 v3.01 AMS symbols B
|
||||
|
@ -872,15 +872,57 @@ ure ex-trac-tion through
|
|||
|
||||
]
|
||||
Chapter 2.
|
||||
) [2
|
||||
[2
|
||||
|
||||
]
|
||||
<screenshot003.png, id=95, 1055.44313pt x 368.1253pt>
|
||||
File: screenshot003.png Graphic file (type png)
|
||||
<use screenshot003.png>
|
||||
Package pdftex.def Info: screenshot003.png used on input line 30.
|
||||
(pdftex.def) Requested size: 390.0pt x 136.02448pt.
|
||||
[3 <./screenshot003.png (PNG copy)>]
|
||||
<screenshot004.png, id=105, 1023.825pt x 358.33875pt>
|
||||
File: screenshot004.png Graphic file (type png)
|
||||
<use screenshot004.png>
|
||||
Package pdftex.def Info: screenshot004.png used on input line 45.
|
||||
(pdftex.def) Requested size: 390.0pt x 136.4982pt.
|
||||
) [4 <./screenshot004.png (PNG copy)>]
|
||||
\openout2 = `chapters/results.aux'.
|
||||
|
||||
(./chapters/results.tex)
|
||||
|
||||
(./chapters/results.tex
|
||||
Chapter 3.
|
||||
[5
|
||||
|
||||
|
||||
|
||||
|
||||
]
|
||||
<screenshot001.png, id=124, 462.528pt x 346.896pt>
|
||||
File: screenshot001.png Graphic file (type png)
|
||||
<use screenshot001.png>
|
||||
Package pdftex.def Info: screenshot001.png used on input line 14.
|
||||
(pdftex.def) Requested size: 390.0pt x 292.5128pt.
|
||||
[6 <./screenshot001.png>]
|
||||
<screenshot002.png, id=132, 462.528pt x 346.896pt>
|
||||
File: screenshot002.png Graphic file (type png)
|
||||
<use screenshot002.png>
|
||||
Package pdftex.def Info: screenshot002.png used on input line 44.
|
||||
(pdftex.def) Requested size: 390.0pt x 292.5128pt.
|
||||
[7]) [8 <./screenshot002.png>]
|
||||
\openout2 = `chapters/conclusion.aux'.
|
||||
|
||||
(./chapters/conclusion.tex)
|
||||
|
||||
(./chapters/conclusion.tex
|
||||
Chapter 4.
|
||||
(./report.bbl [9
|
||||
|
||||
|
||||
|
||||
|
||||
])) [10
|
||||
|
||||
]
|
||||
|
||||
Package caption Warning: Unused \captionsetup[sub] on input line 25.
|
||||
See the caption package documentation for explanation.
|
||||
|
@ -1026,52 +1068,44 @@ chapters/abstract.tex
|
|||
umsa.fd 2013/01/14 v3.01 AMS symbols A
|
||||
umsb.fd 2013/01/14 v3.01 AMS symbols B
|
||||
chapters/introduction_and_motivation.tex
|
||||
screenshot003.png Graphic file (type png)
|
||||
screenshot004.png Graphic file (type png)
|
||||
chapters/results.tex
|
||||
screenshot001.png Graphic file (type png)
|
||||
screenshot002.png Graphic file (type png)
|
||||
chapters/conclusion.tex
|
||||
report.bbl
|
||||
***********
|
||||
|
||||
|
||||
LaTeX Font Warning: Size substitutions with differences
|
||||
(Font) up to 0.72pt have occurred.
|
||||
|
||||
|
||||
Package rerunfilecheck Warning: File `report.out' has changed.
|
||||
(rerunfilecheck) Rerun to get outlines right
|
||||
(rerunfilecheck) or use package `bookmark'.
|
||||
|
||||
Package rerunfilecheck Info: Checksums for `report.out':
|
||||
(rerunfilecheck) Before: CAA4FAEE2960169512419C2B4C6CA732;625
|
||||
(rerunfilecheck) After: 7D3E0977AEBCA33FB26B3D757A6C75B2;608.
|
||||
Package rerunfilecheck Info: File `report.out' has not changed.
|
||||
(rerunfilecheck) Checksum: C72A7A48928A7D1700C562F4A4A7FB41;1013.
|
||||
)
|
||||
(\end occurred inside a group at level 1)
|
||||
|
||||
### semi simple group (level 1) entered at line 57 (\begingroup)
|
||||
### bottom level
|
||||
Here is how much of TeX's memory you used:
|
||||
19569 strings out of 476091
|
||||
322656 string characters out of 5794081
|
||||
1855330 words of memory out of 5000000
|
||||
39500 multiletter control sequences out of 15000+600000
|
||||
19668 strings out of 476091
|
||||
324234 string characters out of 5794081
|
||||
1856330 words of memory out of 5000000
|
||||
39565 multiletter control sequences out of 15000+600000
|
||||
523015 words of font info for 58 fonts, out of 8000000 for 9000
|
||||
1141 hyphenation exceptions out of 8191
|
||||
94i,5n,92p,1010b,2028s stack positions out of 10000i,1000n,20000p,200000b,200000s
|
||||
|
||||
pdfTeX warning (dest): name{subsection.2.0.2} has been referenced but does not
|
||||
exist, replaced by a fixed one
|
||||
|
||||
|
||||
pdfTeX warning (dest): name{subsection.2.0.1} has been referenced but does not
|
||||
exist, replaced by a fixed one
|
||||
|
||||
{/usr/share/texmf/fonts/enc/dvips/cm-super/cm-super-t1.enc}</usr/share/texmf/fo
|
||||
nts/type1/public/cm-super/sfbx1200.pfb></usr/share/texmf/fonts/type1/public/cm-
|
||||
super/sfbx1728.pfb></usr/share/texmf/fonts/type1/public/cm-super/sfbx2488.pfb><
|
||||
/usr/share/texmf/fonts/type1/public/cm-super/sfrm1200.pfb></usr/share/texmf/fon
|
||||
ts/type1/public/cm-super/sfrm1440.pfb>
|
||||
Output written on report.pdf (5 pages, 129391 bytes).
|
||||
94i,8n,92p,1010b,2118s stack positions out of 10000i,1000n,20000p,200000b,200000s
|
||||
{/usr/share/texmf/fonts/enc/dvips/cm-super/cm-super-t1.enc}</us
|
||||
r/share/texmf/fonts/type1/public/cm-super/sfbx1200.pfb></usr/share/texmf/fonts/
|
||||
type1/public/cm-super/sfbx1728.pfb></usr/share/texmf/fonts/type1/public/cm-supe
|
||||
r/sfbx2488.pfb></usr/share/texmf/fonts/type1/public/cm-super/sfrm1200.pfb></usr
|
||||
/share/texmf/fonts/type1/public/cm-super/sfrm1440.pfb></usr/share/texmf/fonts/t
|
||||
ype1/public/cm-super/sfti1200.pfb>
|
||||
Output written on report.pdf (13 pages, 304825 bytes).
|
||||
PDF statistics:
|
||||
103 PDF objects out of 1000 (max. 8388607)
|
||||
84 compressed objects within 1 object stream
|
||||
20 named destinations out of 1000 (max. 500000)
|
||||
58 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
203 PDF objects out of 1000 (max. 8388607)
|
||||
167 compressed objects within 2 object streams
|
||||
50 named destinations out of 1000 (max. 500000)
|
||||
110 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
|
||||
|
|
|
@ -3,3 +3,7 @@
|
|||
\BOOKMARK [0][-]{chapter.2}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000a\000l\000\040\000S\000e\000t\000u\000p}{}% 3
|
||||
\BOOKMARK [1][-]{section.2.1}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0001}{chapter.2}% 4
|
||||
\BOOKMARK [1][-]{section.2.2}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0002}{chapter.2}% 5
|
||||
\BOOKMARK [0][-]{chapter.3}{\376\377\000R\000e\000s\000u\000l\000t\000s}{}% 6
|
||||
\BOOKMARK [1][-]{section.3.1}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0001}{chapter.3}% 7
|
||||
\BOOKMARK [1][-]{section.3.2}{\376\377\000E\000x\000p\000e\000r\000i\000m\000e\000n\000t\000\040\0002}{chapter.3}% 8
|
||||
\BOOKMARK [0][-]{chapter.4}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{}% 9
|
||||
|
|
BIN
latex/report.pdf
BIN
latex/report.pdf
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -5,7 +5,23 @@
|
|||
\contentsline {paragraph}{}{1}{section*.6}%
|
||||
\contentsline {chapter}{\numberline {2}Experimental Setup}{2}{chapter.2}%
|
||||
\contentsline {paragraph}{}{2}{section*.7}%
|
||||
\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}%
|
||||
\contentsline {paragraph}{}{2}{section*.8}%
|
||||
\contentsline {section}{\numberline {2.2}Experiment 2}{2}{section.2.2}%
|
||||
\contentsline {section}{\numberline {2.1}Experiment 1}{2}{section.2.1}%
|
||||
\contentsline {paragraph}{}{2}{section*.9}%
|
||||
\contentsline {paragraph}{}{3}{section*.10}%
|
||||
\contentsline {paragraph}{}{3}{section*.11}%
|
||||
\contentsline {paragraph}{}{3}{section*.12}%
|
||||
\contentsline {section}{\numberline {2.2}Experiment 2}{3}{section.2.2}%
|
||||
\contentsline {paragraph}{}{3}{section*.14}%
|
||||
\contentsline {paragraph}{}{4}{section*.15}%
|
||||
\contentsline {chapter}{\numberline {3}Results}{5}{chapter.3}%
|
||||
\contentsline {section}{\numberline {3.1}Experiment 1}{5}{section.3.1}%
|
||||
\contentsline {paragraph}{}{5}{section*.17}%
|
||||
\contentsline {paragraph}{}{5}{section*.18}%
|
||||
\contentsline {paragraph}{}{5}{section*.19}%
|
||||
\contentsline {section}{\numberline {3.2}Experiment 2}{7}{section.3.2}%
|
||||
\contentsline {paragraph}{}{7}{section*.22}%
|
||||
\contentsline {paragraph}{}{7}{section*.23}%
|
||||
\contentsline {chapter}{\numberline {4}Conclusion}{9}{chapter.4}%
|
||||
\contentsline {paragraph}{}{9}{section*.26}%
|
||||
\contentsline {paragraph}{}{9}{section*.27}%
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 31 KiB |
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
Binary file not shown.
After Width: | Height: | Size: 39 KiB |
Loading…
Reference in New Issue