diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..1ff0c42
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,63 @@
+###############################################################################
+# Set default behavior to automatically normalize line endings.
+###############################################################################
+* text=auto
+
+###############################################################################
+# Set default behavior for command prompt diff.
+#
+# This is need for earlier builds of msysgit that does not have it on by
+# default for csharp files.
+# Note: This is only used by command line
+###############################################################################
+#*.cs diff=csharp
+
+###############################################################################
+# Set the merge driver for project and solution files
+#
+# Merging from the command prompt will add diff markers to the files if there
+# are conflicts (Merging from VS is not affected by the settings below, in VS
+# the diff markers are never inserted). Diff markers may cause the following
+# file extensions to fail to load in VS. An alternative would be to treat
+# these files as binary and thus will always conflict and require user
+# intervention with every merge. To do so, just uncomment the entries below
+###############################################################################
+#*.sln merge=binary
+#*.csproj merge=binary
+#*.vbproj merge=binary
+#*.vcxproj merge=binary
+#*.vcproj merge=binary
+#*.dbproj merge=binary
+#*.fsproj merge=binary
+#*.lsproj merge=binary
+#*.wixproj merge=binary
+#*.modelproj merge=binary
+#*.sqlproj merge=binary
+#*.wwaproj merge=binary
+
+###############################################################################
+# behavior for image files
+#
+# image files are treated as binary by default.
+###############################################################################
+#*.jpg binary
+#*.png binary
+#*.gif binary
+
+###############################################################################
+# diff behavior for common document formats
+#
+# Convert binary document formats to text before diffing them. This feature
+# is only available from the command line. Turn it on by uncommenting the
+# entries below.
+###############################################################################
+#*.doc diff=astextplain
+#*.DOC diff=astextplain
+#*.docx diff=astextplain
+#*.DOCX diff=astextplain
+#*.dot diff=astextplain
+#*.DOT diff=astextplain
+#*.pdf diff=astextplain
+#*.PDF diff=astextplain
+#*.rtf diff=astextplain
+#*.RTF diff=astextplain
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml
deleted file mode 100644
index bdaab28..0000000
--- a/.github/workflows/python-publish.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# This workflow will upload a Python Package using Twine when a release is created
-# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
-
-# This workflow uses actions that are not certified by GitHub.
-# They are provided by a third-party and are governed by
-# separate terms of service, privacy policy, and support
-# documentation.
-
-name: Upload Python Package
-
-on:
- release:
- types: [published]
-
-permissions:
- contents: read
-
-jobs:
- deploy:
-
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: '3.x'
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install build
- - name: Build package
- run: python -m build
- - name: Publish package
- uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
- with:
- user: __token__
- password: ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.gitignore b/.gitignore
index f59af41..9491a2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,363 @@
-.venv/
-build/
-bin/
-temp/
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+##
+## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
+
+# User-specific files
+*.rsuser
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Mono auto generated files
+mono_crash.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+[Ww][Ii][Nn]32/
+[Aa][Rr][Mm]/
+[Aa][Rr][Mm]64/
+bld/
+[Bb]in/
+[Oo]bj/
+[Oo]ut/
+[Ll]og/
+[Ll]ogs/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+# Uncomment if you have tasks that create the project's static files in wwwroot
+#wwwroot/
+
+# Visual Studio 2017 auto generated files
+Generated\ Files/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUnit
+*.VisualState.xml
+TestResult.xml
+nunit-*.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+# Benchmark Results
+BenchmarkDotNet.Artifacts/
+
+# .NET Core
+project.lock.json
+project.fragment.lock.json
+artifacts/
+
+# ASP.NET Scaffolding
+ScaffoldingReadMe.txt
+
+# StyleCop
+StyleCopReport.xml
+
+# Files built by Visual Studio
+*_i.c
+*_p.c
+*_h.h
+*.ilk
+*.meta
+*.obj
+*.iobj
+*.pch
+*.pdb
+*.ipdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*_wpftmp.csproj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opendb
+*.opensdf
+*.sdf
+*.cachefile
+*.VC.db
+*.VC.VC.opendb
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+*.sap
+
+# Visual Studio Trace Files
+*.e2e
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# AxoCover is a Code Coverage Tool
+.axoCover/*
+!.axoCover/settings.json
+
+# Coverlet is a free, cross platform Code Coverage Tool
+coverage*.json
+coverage*.xml
+coverage*.info
+
+# Visual Studio code coverage results
+*.coverage
+*.coveragexml
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+nCrunchTemp_*
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+PublishScripts/
+
+# NuGet Packages
+*.nupkg
+# NuGet Symbol Packages
+*.snupkg
+# The packages folder can be ignored because of Package Restore
+**/[Pp]ackages/*
+# except build/, which is used as an MSBuild target.
+!**/[Pp]ackages/build/
+# Uncomment if necessary however generally it will be regenerated when needed
+#!**/[Pp]ackages/repositories.config
+# NuGet v3's project.json files produces more ignorable files
+*.nuget.props
+*.nuget.targets
+
+# Microsoft Azure Build Output
+csx/
+*.build.csdef
+
+# Microsoft Azure Emulator
+ecf/
+rcf/
+
+# Windows Store app package directories and files
+AppPackages/
+BundleArtifacts/
+Package.StoreAssociation.xml
+_pkginfo.txt
+*.appx
+*.appxbundle
+*.appxupload
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+*.[Cc]ache
+# but keep track of directories ending in .cache
+!?*.[Cc]ache/
+
+# Others
+ClientBin/
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.jfm
+*.pfx
+*.publishsettings
+orleans.codegen.cs
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+#*.snk
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+#bower_components/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+ServiceFabricBackup/
+*.rptproj.bak
+
+# SQL Server files
+*.mdf
+*.ldf
+*.ndf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+*.rptproj.rsuser
+*- [Bb]ackup.rdl
+*- [Bb]ackup ([0-9]).rdl
+*- [Bb]ackup ([0-9][0-9]).rdl
+
+# Microsoft Fakes
+FakesAssemblies/
+
+# GhostDoc plugin setting file
+*.GhostDoc.xml
+
+# Node.js Tools for Visual Studio
+.ntvs_analysis.dat
+node_modules/
+
+# Visual Studio 6 build log
+*.plg
+
+# Visual Studio 6 workspace options file
+*.opt
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+*.vbw
+
+# Visual Studio LightSwitch build output
+**/*.HTMLClient/GeneratedArtifacts
+**/*.DesktopClient/GeneratedArtifacts
+**/*.DesktopClient/ModelManifest.xml
+**/*.Server/GeneratedArtifacts
+**/*.Server/ModelManifest.xml
+_Pvt_Extensions
+
+# Paket dependency manager
+.paket/paket.exe
+paket-files/
+
+# FAKE - F# Make
+.fake/
+
+# CodeRush personal settings
+.cr/personal
+
+# Python Tools for Visual Studio (PTVS)
__pycache__/
-*/*.egg-info/*
-dist/
-*.so
\ No newline at end of file
+*.pyc
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+*.tss
+
+# Telerik's JustMock configuration file
+*.jmconfig
+
+# BizTalk build output
+*.btp.cs
+*.btm.cs
+*.odx.cs
+*.xsd.cs
+
+# OpenCover UI analysis results
+OpenCover/
+
+# Azure Stream Analytics local run output
+ASALocalRun/
+
+# MSBuild Binary and Structured Log
+*.binlog
+
+# NVidia Nsight GPU debugger configuration file
+*.nvuser
+
+# MFractors (Xamarin productivity tool) working folder
+.mfractor/
+
+# Local History for Visual Studio
+.localhistory/
+
+# BeatPulse healthcheck temp database
+healthchecksdb
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+MigrationBackup/
+
+# Ionide (cross platform F# VS Code tools) working folder
+.ionide/
+
+# Fody - auto-generated XML schema
+FodyWeavers.xsd
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 0682eb2..0000000
--- a/.gitmodules
+++ /dev/null
@@ -1,6 +0,0 @@
-[submodule "tensor-array-repo/Tensor-Array"]
- path = tensor-array-repo/Tensor-Array
- url = https://github.com/Tensor-Array/Tensor-Array
-[submodule "third_party/pybind11"]
- path = third_party/pybind11
- url = https://github.com/pybind/pybind11
diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
deleted file mode 100644
index d37e866..0000000
--- a/.vscode/c_cpp_properties.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "configurations": [
- {
- "name": "Linux",
- "includePath": [
- "${workspaceFolder}/**",
- "${workspaceFolder}/third_party/pybind11/include/**",
- "/usr/include/python3.10/**",
- "${workspaceFolder}/tensor-array-repo/Tensor-Array/src/**"
- ],
- "defines": [],
- "compilerPath": "/usr/bin/g++"
- }
- ],
- "version": 4
-}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
deleted file mode 100644
index d61da3f..0000000
--- a/.vscode/settings.json
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "files.associations": {
- "initializer_list": "cpp",
- "array": "cpp",
- "atomic": "cpp",
- "bit": "cpp",
- "*.tcc": "cpp",
- "cctype": "cpp",
- "chrono": "cpp",
- "clocale": "cpp",
- "cmath": "cpp",
- "compare": "cpp",
- "complex": "cpp",
- "concepts": "cpp",
- "condition_variable": "cpp",
- "cstdarg": "cpp",
- "cstddef": "cpp",
- "cstdint": "cpp",
- "cstdio": "cpp",
- "cstdlib": "cpp",
- "cstring": "cpp",
- "ctime": "cpp",
- "cwchar": "cpp",
- "cwctype": "cpp",
- "deque": "cpp",
- "forward_list": "cpp",
- "string": "cpp",
- "unordered_map": "cpp",
- "unordered_set": "cpp",
- "vector": "cpp",
- "exception": "cpp",
- "algorithm": "cpp",
- "functional": "cpp",
- "iterator": "cpp",
- "memory": "cpp",
- "memory_resource": "cpp",
- "numeric": "cpp",
- "optional": "cpp",
- "random": "cpp",
- "ratio": "cpp",
- "string_view": "cpp",
- "system_error": "cpp",
- "tuple": "cpp",
- "type_traits": "cpp",
- "utility": "cpp",
- "iosfwd": "cpp",
- "istream": "cpp",
- "limits": "cpp",
- "mutex": "cpp",
- "new": "cpp",
- "numbers": "cpp",
- "ostream": "cpp",
- "semaphore": "cpp",
- "sstream": "cpp",
- "stdexcept": "cpp",
- "stop_token": "cpp",
- "streambuf": "cpp",
- "thread": "cpp",
- "cinttypes": "cpp",
- "typeindex": "cpp",
- "typeinfo": "cpp",
- "codecvt": "cpp",
- "iomanip": "cpp",
- "iostream": "cpp",
- "list": "cpp",
- "map": "cpp",
- "set": "cpp",
- "valarray": "cpp",
- "variant": "cpp"
- },
- "python.REPL.enableREPLSmartSend": false
-}
\ No newline at end of file
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
deleted file mode 100644
index b0377b4..0000000
--- a/.vscode/tasks.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "version": "2.0.0",
- "options": {
- "cwd": "${workspaceFolder}/build"
- },
- "tasks": [
- {
- "label": "cmake",
- "command": "cmake",
- "args": [
- ".."
- ]
- },
- {
- "label": "make",
- "command": "make"
- },
- {
- "label": "cmake build",
- "dependsOn": [
- "cmake",
- "make"
- ],
- "problemMatcher": [
- "$nvcc"
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index 0132681..0000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-cmake_minimum_required(VERSION 3.18.0)
-find_package(Python COMPONENTS Interpreter Development)
-if (${Python_FOUND})
-
-project(TensorArray_Python VERSION 0.1.0 LANGUAGES C CXX)
-
-include(CTest)
-enable_testing()
-
-add_subdirectory("third_party/pybind11" EXCLUDE_FROM_ALL)
-add_subdirectory("tensor-array-repo/Tensor-Array" EXCLUDE_FROM_ALL)
-pybind11_add_module(tensor2 SHARED src/tensor_array/_core/tensor_bind.cc)
-
-target_include_directories(
- tensor2
- PUBLIC "${CMAKE_CURRENT_LIST_DIR}/tensor-array-repo/Tensor-Array/src"
- INTERFACE ${Python_INCLUDE_DIRS}
-)
-
-SET_TARGET_PROPERTIES(tensor2 PROPERTIES PREFIX "")
-
-target_link_libraries(tensor2 PUBLIC TensorArray::Core)
-
-set(CPACK_PROJECT_NAME ${PROJECT_NAME})
-set(CPACK_PROJECT_VERSION ${PROJECT_VERSION})
-include(CPack)
-
-endif()
-
diff --git a/CPythonTensor.sln b/CPythonTensor.sln
new file mode 100644
index 0000000..1a115f2
--- /dev/null
+++ b/CPythonTensor.sln
@@ -0,0 +1,45 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.8.34316.72
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "CPythonTensor", "CPythonTensor\CPythonTensor.vcxproj", "{79ABB953-907D-469F-9723-42ED1915A4B0}"
+EndProject
+Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "PythonTensorTesting", "PythonTensorTesting\PythonTensorTesting.pyproj", "{99206FE6-CF46-4140-93B2-3F659D2CA890}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|Any CPU = Release|Any CPU
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|Any CPU.Build.0 = Debug|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|x64.ActiveCfg = Debug|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|x64.Build.0 = Debug|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|x86.ActiveCfg = Debug|Win32
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Debug|x86.Build.0 = Debug|Win32
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|Any CPU.ActiveCfg = Release|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|Any CPU.Build.0 = Release|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|x64.ActiveCfg = Release|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|x64.Build.0 = Release|x64
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|x86.ActiveCfg = Release|Win32
+ {79ABB953-907D-469F-9723-42ED1915A4B0}.Release|x86.Build.0 = Release|Win32
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Release|x64.ActiveCfg = Release|Any CPU
+ {99206FE6-CF46-4140-93B2-3F659D2CA890}.Release|x86.ActiveCfg = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {F504DBBF-809B-49DA-9604-93EE9EB84AB4}
+ EndGlobalSection
+EndGlobal
diff --git a/CPythonTensor/CPythonTensor.vcxproj b/CPythonTensor/CPythonTensor.vcxproj
new file mode 100644
index 0000000..e4b0db5
--- /dev/null
+++ b/CPythonTensor/CPythonTensor.vcxproj
@@ -0,0 +1,154 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+
+
+
+ 17.0
+ Win32Proj
+ {79abb953-907d-469f-9723-42ed1915a4b0}
+ CPythonTensor
+ 10.0
+
+
+
+ Application
+ true
+ v143
+ Unicode
+
+
+ Application
+ false
+ v143
+ true
+ Unicode
+
+
+ DynamicLibrary
+ v143
+ Unicode
+
+
+ DynamicLibrary
+ v143
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ .pyd
+ tensor_bind
+
+
+ .pyd
+ tensor_bind
+
+
+
+ Level3
+ true
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+
+
+ Console
+ true
+
+
+
+
+ Level3
+ true
+ true
+ true
+ WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+ Level3
+ true
+ _DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ ..\..\TensorCore\TensorCore;C:\Users\Noob\AppData\Local\Programs\Python\Python311\include;C:\Users\Noob\AppData\Local\Programs\Python\Python311\Lib\site-packages\numpy\core\include;C:\Users\Noob\AppData\Local\Programs\Python\Python311\Lib\site-packages\pybind11\include;%(AdditionalIncludeDirectories)
+ stdcpp17
+
+
+ NotSet
+ true
+ ..\..\TensorCore\$(IntDir);C:\Users\Noob\AppData\Local\Programs\Python\Python311\libs;%(AdditionalLibraryDirectories)
+ python311.lib;TensorCore.lib;%(AdditionalDependencies)
+
+
+ xcopy /y /d "..\..\TensorCore\$(IntDir)TensorCore.dll" "$(OutDir)"
+
+
+
+
+ Level3
+ true
+ true
+ NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ ..\..\TensorCore\TensorCore;C:\Users\Noob\AppData\Local\Programs\Python\Python311\include;C:\Users\Noob\AppData\Local\Programs\Python\Python311\Lib\site-packages\numpy\core\include;C:\Users\Noob\AppData\Local\Programs\Python\Python311\Lib\site-packages\pybind11\include;%(AdditionalIncludeDirectories)
+ stdcpp17
+
+
+ NotSet
+ true
+ true
+ true
+ ..\..\TensorCore\$(IntDir);C:\Users\Noob\AppData\Local\Programs\Python\Python311\libs;%(AdditionalLibraryDirectories)
+ python311.lib;TensorCore.lib;%(AdditionalDependencies)
+
+
+ xcopy /y /d "..\..\TensorCore\$(IntDir)TensorCore.dll" "$(OutDir)"
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CPythonTensor/CPythonTensor.vcxproj.filters b/CPythonTensor/CPythonTensor.vcxproj.filters
new file mode 100644
index 0000000..fed9ccb
--- /dev/null
+++ b/CPythonTensor/CPythonTensor.vcxproj.filters
@@ -0,0 +1,22 @@
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+
\ No newline at end of file
diff --git a/CPythonTensor/py_tensor.cpp b/CPythonTensor/py_tensor.cpp
new file mode 100644
index 0000000..bc68efc
--- /dev/null
+++ b/CPythonTensor/py_tensor.cpp
@@ -0,0 +1,30 @@
+#include "module.h"
+#include
+#include
+
+using namespace tensor_array::value;
+
+void* call_tensor(unsigned int nd, unsigned int* dimensions, const void* data)
+{
+ return new Tensor(TensorBase(typeid(float), std::initializer_list(dimensions, dimensions + nd), data, tensor_array::devices::DEVICE_CPU_0));
+}
+
+void delete_tensor(void* t)
+{
+ delete t;
+}
+
+void* add_tensor(const void* a, const void* b)
+{
+ const Tensor* t_a = static_cast(a);
+ const Tensor* t_b = static_cast(b);
+ return new Tensor(add(*t_a, *t_b));
+}
+
+const char* to_string(void* t)
+{
+ Tensor* t1 = static_cast(t);
+ std::ostringstream stream;
+ stream << *t1;
+ return stream.str().c_str();
+}
diff --git a/CPythonTensor/tensor_bind.cc b/CPythonTensor/tensor_bind.cc
new file mode 100644
index 0000000..2ecc051
--- /dev/null
+++ b/CPythonTensor/tensor_bind.cc
@@ -0,0 +1,56 @@
+#include
+#include
+#include
+#include
+
+using namespace tensor_array::value;
+
+template
+TensorBase convert_numpy_to_tensor_base(pybind11::array_t py_buf)
+{
+ pybind11::buffer_info info = py_buf.request();
+ std::vector shape_vec(info.ndim);
+ std::transform
+ (
+ info.shape.cbegin(),
+ info.shape.cend(),
+ shape_vec.begin(),
+ [](pybind11::size_t dim)
+ {
+ return static_cast(dim);
+ }
+ );
+ return TensorBase(typeid(T), shape_vec, info.ptr);
+}
+
+std::string tensor_to_string(const Tensor t)
+{
+ std::ostringstream osstream;
+ osstream << t;
+ return osstream.str();
+}
+
+PYBIND11_MODULE(tensor_bind, m)
+{
+ pybind11::class_(m, "TensorC")
+ .def(pybind11::init())
+ .def(pybind11::init(&convert_numpy_to_tensor_base))
+ .def(pybind11::self + pybind11::self)
+ .def(pybind11::self - pybind11::self)
+ .def(pybind11::self * pybind11::self)
+ .def(pybind11::self / pybind11::self)
+ .def(pybind11::self += pybind11::self)
+ .def(pybind11::self -= pybind11::self)
+ .def(pybind11::self *= pybind11::self)
+ .def(pybind11::self /= pybind11::self)
+ .def(pybind11::self == pybind11::self)
+ .def(pybind11::self != pybind11::self)
+ .def(pybind11::self >= pybind11::self)
+ .def(pybind11::self <= pybind11::self)
+ .def(pybind11::self > pybind11::self)
+ .def(pybind11::self < pybind11::self)
+ .def(+pybind11::self)
+ .def(-pybind11::self)
+ .def("__matmul__", &matmul)
+ .def("__repr__", &tensor_to_string);
+}
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 0422de0..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2024 TensorArray-Creators
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/PythonTensorTesting/PyTensorArray/__init__.py b/PythonTensorTesting/PyTensorArray/__init__.py
new file mode 100644
index 0000000..0e34f41
--- /dev/null
+++ b/PythonTensorTesting/PyTensorArray/__init__.py
@@ -0,0 +1 @@
+from .tensor import *
\ No newline at end of file
diff --git a/PythonTensorTesting/PyTensorArray/tensor.py b/PythonTensorTesting/PyTensorArray/tensor.py
new file mode 100644
index 0000000..285a6a2
--- /dev/null
+++ b/PythonTensorTesting/PyTensorArray/tensor.py
@@ -0,0 +1,11 @@
+import numpy as np
+import tensor_bind as t
+
+def tensor_decorator(cls):
+ return t.TensorC
+
+@tensor_decorator
+class Tensor:
+ def __init__(self):
+ pass
+
diff --git a/PythonTensorTesting/PythonTensorTesting.pyproj b/PythonTensorTesting/PythonTensorTesting.pyproj
new file mode 100644
index 0000000..cbd915e
--- /dev/null
+++ b/PythonTensorTesting/PythonTensorTesting.pyproj
@@ -0,0 +1,60 @@
+
+
+ Debug
+ 2.0
+ 99206fe6-cf46-4140-93b2-3f659d2ca890
+ .
+ main.py
+
+
+ .
+ .
+ PythonTensorTesting
+ PythonTensorTesting
+ False
+ Standard Python launcher
+ True
+ none
+ test*.py
+ .
+ -i
+
+
+ CondaEnv|CondaEnv|env
+
+
+ true
+ false
+
+
+ true
+ false
+
+
+
+
+
+
+
+
+ CPythonTensor
+ {79abb953-907d-469f-9723-42ed1915a4b0}
+ True
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/PythonTensorTesting/main.py b/PythonTensorTesting/main.py
new file mode 100644
index 0000000..d57f07c
--- /dev/null
+++ b/PythonTensorTesting/main.py
@@ -0,0 +1,14 @@
+import PyTensorArray as py_t_arr
+
+import numpy as np
+
+if __name__ == '__main__':
+ t4 = np.array([[1, 2, 3], [4, 5, 6]])
+ t1 = py_t_arr.tensor.Tensor(t4)
+ t2 = py_t_arr.tensor.Tensor(t4)
+ t3 = t1 + t2
+ print("Hello")
+ print(t1)
+ print(t2)
+ print(t3)
+ print(t1 == t2)
diff --git a/README.md b/README.md
deleted file mode 100644
index 877b010..0000000
--- a/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Tensor Array Python
-[](https://pypi.org/project/TensorArray/)
-[](https://pypi.org/project/TensorArray/)
-[](https://pypi.org/project/TensorArray/)
-[](https://pypi.org/project/TensorArray/)
-[](#)
-
-This machine learning library using [Tensor-Array](https://github.com/Tensor-Array/Tensor-Array) library
-
-This project is still in alpha version, we are trying to make this look like the main framework but it is easier to code.
-
-## How to install Tensor-Array python version.
-```shell
-$ pip install TensorArray
-```
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index 23c27f3..0000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[build-system]
-requires = [
- "setuptools",
- "wheel",
- "ninja",
- "cmake"
-]
-build-backend = "setuptools.build_meta"
diff --git a/setup.py b/setup.py
deleted file mode 100644
index a981b6e..0000000
--- a/setup.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import os
-import re
-import subprocess
-import sys
-from pathlib import Path
-
-from setuptools import Extension, setup, find_packages
-from setuptools.command.build_ext import build_ext
-
-# Convert distutils Windows platform specifiers to CMake -A arguments
-PLAT_TO_CMAKE = {
- "win32": "Win32",
- "win-amd64": "x64",
- "win-arm32": "ARM",
- "win-arm64": "ARM64",
-}
-
-
-# A CMakeExtension needs a sourcedir instead of a file list.
-# The name must be the _single_ output extension from the CMake build.
-# If you need multiple extensions, see scikit-build.
-class CMakeExtension(Extension):
- def __init__(self, name: str, sourcedir: str = "") -> None:
- super().__init__(name, sources=[])
- self.sourcedir = os.fspath(Path(sourcedir).resolve())
-
-
-class CMakeBuild(build_ext):
- def build_extension(self, ext: CMakeExtension) -> None:
- # Must be in this form due to bug in .resolve() only fixed in Python 3.10+
- ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name)
- extdir = ext_fullpath.parent.resolve()
-
- # Using this requires trailing slash for auto-detection & inclusion of
- # auxiliary "native" libs
-
- debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
- cfg = "Debug" if debug else "Release"
-
- # CMake lets you override the generator - we need to check this.
- # Can be set with Conda-Build, for example.
- cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
-
- # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
- # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
- # from Python.
- cmake_args = [
- f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
- f"-DPYTHON_EXECUTABLE={sys.executable}",
- f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
- ]
- build_args = []
- # Adding CMake arguments set as environment variable
- # (needed e.g. to build for ARM OSx on conda-forge)
- if "CMAKE_ARGS" in os.environ:
- cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
-
- # In this example, we pass in the version to C++. You might not need to.
- cmake_args += [f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"]
-
- if self.compiler.compiler_type != "msvc":
- # Using Ninja-build since it a) is available as a wheel and b)
- # multithreads automatically. MSVC would require all variables be
- # exported for Ninja to pick it up, which is a little tricky to do.
- # Users can override the generator with CMAKE_GENERATOR in CMake
- # 3.15+.
- if not cmake_generator or cmake_generator == "Ninja":
- try:
- import ninja
-
- ninja_executable_path = Path(ninja.BIN_DIR) / "ninja"
- cmake_args += [
- "-GNinja",
- f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
- ]
- except ImportError:
- pass
-
- else:
- # Single config generators are handled "normally"
- single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
-
- # CMake allows an arch-in-generator style for backward compatibility
- contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
-
- # Specify the arch if using MSVC generator, but only if it doesn't
- # contain a backward-compatibility arch spec already in the
- # generator name.
- if not single_config and not contains_arch:
- cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
-
- # Multi-config generators have a different way to specify configs
- if not single_config:
- cmake_args += [
- f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
- ]
- build_args += ["--config", cfg]
-
- if sys.platform.startswith("darwin"):
- # Cross-compile support for macOS - respect ARCHFLAGS if set
- archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
- if archs:
- cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
-
- # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
- # across all generators.
- if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
- # self.parallel is a Python 3 only way to set parallel jobs by hand
- # using -j in the build_ext call, not supported by pip or PyPA-build.
- if hasattr(self, "parallel") and self.parallel:
- # CMake 3.12+ only.
- build_args += [f"-j{self.parallel}"]
-
- build_temp = Path(self.build_temp) / ext.name
- if not build_temp.exists():
- build_temp.mkdir(parents=True)
-
- subprocess.run(
- ["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True
- )
- subprocess.run(
- ["cmake", "--build", ".", *build_args], cwd=build_temp, check=True
- )
-
-def main():
- cwd = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(cwd, "README.md"), encoding="utf-8") as f:
- long_description = f.read()
-
- packages = find_packages("src")
-
- print(packages)
-
- setup(
- name = "TensorArray",
- version = "0.0.1a3",
- description = "A machine learning package",
- long_description=long_description,
- authors = "TensorArray-Creators",
- url= "https://github.com/Tensor-Array/Tensor-Array-Python",
- packages=packages,
- ext_modules=[
- CMakeExtension("tensor_array/core/tensor2")
- ],
- classifiers = [
- "Development Status :: 2 - Pre-Alpha",
-
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
- "Programming Language :: Python :: 3.12",
- "Programming Language :: Python :: 3.13",
-
- "License :: OSI Approved :: MIT License",
-
- "Environment :: GPU :: NVIDIA CUDA :: 12",
- ],
- license="MIT",
- cmdclass={
- "build_ext": CMakeBuild
- },
- package_dir={
- "": "src",
- "tests": "tests"
- },
- python_requires=">=3.8",
- )
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/src/tensor_array/__init__.py b/src/tensor_array/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/tensor_array/_core/tensor_bind.cc b/src/tensor_array/_core/tensor_bind.cc
deleted file mode 100644
index 01406d1..0000000
--- a/src/tensor_array/_core/tensor_bind.cc
+++ /dev/null
@@ -1,239 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include
-
-using namespace tensor_array::value;
-using namespace tensor_array::datatype;
-
-template
-TensorBase convert_numpy_to_tensor_base(pybind11::array_t py_buf)
-{
- pybind11::buffer_info info = py_buf.request();
- std::vector shape_vec(info.ndim);
- std::transform
- (
- info.shape.cbegin(),
- info.shape.cend(),
- shape_vec.begin(),
- [](pybind11::size_t dim)
- {
- return static_cast(dim);
- }
- );
- return TensorBase(warp_type(warp_type(typeid(T))), shape_vec, info.ptr);
-}
-
-pybind11::dtype get_py_type(const std::type_info& info)
-{
- if (info == typeid(std::int8_t))
- return pybind11::dtype::of();
- if (info == typeid(std::int16_t))
- return pybind11::dtype::of();
- if (info == typeid(std::int32_t))
- return pybind11::dtype::of();
- if (info == typeid(std::int64_t))
- return pybind11::dtype::of();
- if (info == typeid(std::uint8_t))
- return pybind11::dtype::of();
- if (info == typeid(std::uint16_t))
- return pybind11::dtype::of();
- if (info == typeid(std::uint32_t))
- return pybind11::dtype::of();
- if (info == typeid(std::uint64_t))
- return pybind11::dtype::of();
- if (info == typeid(bool))
- return pybind11::dtype::of();
- if (info == typeid(float))
- return pybind11::dtype::of();
- if (info == typeid(double))
- return pybind11::dtype::of();
- throw std::runtime_error("no dtype");
-}
-
-pybind11::array convert_tensor_to_numpy(const Tensor& self)
-{
- const TensorBase& base_tensor = self.get_buffer().change_device({tensor_array::devices::CPU, 0});
- std::vector shape_vec(base_tensor.shape().size());
- std::transform
- (
- base_tensor.shape().begin(),
- base_tensor.shape().end(),
- shape_vec.begin(),
- [](unsigned int dim)
- {
- return static_cast(dim);
- }
- );
- auto ty0 = pybind11::detail::get_type_info(base_tensor.type());
- pybind11::dtype ty1 = get_py_type(base_tensor.type());
- return pybind11::array(ty1, shape_vec, base_tensor.data());
-}
-
-Tensor python_tuple_slice(const Tensor& self, pybind11::tuple tuple_slice)
-{
- std::vector t_slices;
- for (size_t i = 0; i < tuple_slice.size(); i++)
- {
- ssize_t start, stop, step;
- ssize_t length;
- pybind11::slice py_slice = tuple_slice[i].cast();
- if (!py_slice.compute(self.get_buffer().shape().begin()[i], &start, &stop, &step, &length))
- throw std::runtime_error("Invalid slice");
- t_slices.insert
- (
- t_slices.begin() + i,
- Tensor::Slice
- {
- static_cast(start),
- static_cast(stop),
- static_cast(step)
- }
- );
- }
- return self[tensor_array::wrapper::initializer_wrapper(t_slices.begin().operator->(), t_slices.end().operator->())];
-}
-
-Tensor python_slice(const Tensor& self, pybind11::slice py_slice)
-{
- std::vector t_slices;
- ssize_t start, stop, step;
- ssize_t length;
- if (!py_slice.compute(self.get_buffer().shape().begin()[0], &start, &stop, &step, &length))
- throw std::runtime_error("Invalid slice");
- return self
- [
- {
- Tensor::Slice
- {
- static_cast(start),
- static_cast(stop),
- static_cast(step)
- }
- }
- ];
-}
-
-Tensor python_index(const Tensor& self, unsigned int i)
-{
- return self[i];
-}
-
-std::size_t python_len(const Tensor& self)
-{
- std::initializer_list shape_list = self.get_buffer().shape();
- return shape_list.size() != 0 ? shape_list.begin()[0]: 1U;
-}
-
-pybind11::str tensor_to_string(const Tensor& self)
-{
- return pybind11::repr(convert_tensor_to_numpy(self));
-}
-
-Tensor tensor_cast_1(const Tensor& self, DataType dtype)
-{
- return self.tensor_cast(warp_type(dtype));
-}
-
-pybind11::tuple tensor_shape(const Tensor& self)
-{
- return pybind11::cast(std::vector(self.get_buffer().shape()));
-}
-
-DataType tensor_type(const Tensor& self)
-{
- return warp_type(self.get_buffer().type());
-}
-
-Tensor tensor_copying(const Tensor& self)
-{
- return self;
-}
-
-Tensor py_zeros(pybind11::tuple shape_tuple, DataType dtype)
-{
- std::vector shape_vec;
- for (auto& it: shape_tuple)
- shape_vec.push_back(it.cast());
- return TensorBase(warp_type(dtype), shape_vec);
-}
-
-PYBIND11_MODULE(tensor2, m)
-{
- pybind11::enum_(m, "DataType")
- .value("BOOL", BOOL_DTYPE)
- .value("S_INT_8", S_INT_8)
- .value("S_INT_16", S_INT_16)
- .value("S_INT_32", S_INT_32)
- .value("S_INT_64", S_INT_64)
- .value("FLOAT", FLOAT_DTYPE)
- .value("DOUBLE", DOUBLE_DTYPE)
- .value("HALF", HALF_DTYPE)
- .value("BFLOAT16", BF16_DTYPE)
- .value("U_INT_8", U_INT_8)
- .value("U_INT_16", U_INT_16)
- .value("U_INT_32", U_INT_32)
- .value("U_INT_64", U_INT_64)
- .export_values();
-
- m.def
- (
- "zeros",
- &py_zeros,
- pybind11::arg("shape"),
- pybind11::arg("dtype") = S_INT_32
- );
-
- pybind11::class_(m, "Tensor")
- .def(pybind11::init())
- .def(pybind11::init(&tensor_copying))
- .def(pybind11::init(&convert_numpy_to_tensor_base))
- .def(pybind11::self + pybind11::self)
- .def(pybind11::self - pybind11::self)
- .def(pybind11::self * pybind11::self)
- .def(pybind11::self / pybind11::self)
- .def(pybind11::self += pybind11::self)
- .def(pybind11::self -= pybind11::self)
- .def(pybind11::self *= pybind11::self)
- .def(pybind11::self /= pybind11::self)
- .def(pybind11::self == pybind11::self)
- .def(pybind11::self != pybind11::self)
- .def(pybind11::self >= pybind11::self)
- .def(pybind11::self <= pybind11::self)
- .def(pybind11::self > pybind11::self)
- .def(pybind11::self < pybind11::self)
- .def(+pybind11::self)
- .def(-pybind11::self)
- .def(hash(pybind11::self))
- .def("transpose", &Tensor::transpose)
- .def("calc_grad", &Tensor::calc_grad)
- .def("get_grad", &Tensor::get_grad)
- .def("sin", &Tensor::sin)
- .def("sin", &Tensor::sin)
- .def("cos", &Tensor::cos)
- .def("tan", &Tensor::tan)
- .def("sinh", &Tensor::sinh)
- .def("cosh", &Tensor::cosh)
- .def("tanh", &Tensor::tanh)
- .def("log", &Tensor::log)
- .def("clone", &Tensor::clone)
- .def("cast", &tensor_cast_1)
- .def("add", &add)
- .def("multiply", &multiply)
- .def("divide", ÷)
- .def("matmul", &matmul)
- .def("condition", &condition)
- .def("numpy", &convert_tensor_to_numpy)
- .def("shape", &tensor_shape)
- .def("dtype", &tensor_type)
- .def("__getitem__", &python_index)
- .def("__getitem__", &python_slice)
- .def("__getitem__", &python_tuple_slice)
- .def("__len__", &python_len)
- .def("__matmul__", &matmul)
- .def("__rmatmul__", &matmul)
- .def("__repr__", &tensor_to_string)
- .def("__copy__", &tensor_copying);
-}
\ No newline at end of file
diff --git a/src/tensor_array/activation.py b/src/tensor_array/activation.py
deleted file mode 100644
index a90fdcb..0000000
--- a/src/tensor_array/activation.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from tensor_array.core import Tensor
-from tensor_array.core import zeros
-
-def relu(input):
- tensor_zeros = zeros(shape = input.shape(), dtype = input.dtype())
- return (input > tensor_zeros).condition(input, tensor_zeros)
-
-def sigmoid(input):
- return input.sigmoid()
-
-def softmax(input, dim = 0):
- return input
diff --git a/src/tensor_array/core/__init__.py b/src/tensor_array/core/__init__.py
deleted file mode 100644
index 1d55471..0000000
--- a/src/tensor_array/core/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from tensor_array.core.tensor2 import Tensor
-from tensor_array.core.tensor2 import zeros
-from tensor_array.core.tensor2 import DataType
diff --git a/src/tensor_array/layers/__init__.py b/src/tensor_array/layers/__init__.py
deleted file mode 100644
index dfa4a1b..0000000
--- a/src/tensor_array/layers/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .layer import Layer
-from .parameter import Parameter
\ No newline at end of file
diff --git a/src/tensor_array/layers/attention/__init__.py b/src/tensor_array/layers/attention/__init__.py
deleted file mode 100644
index 59a815f..0000000
--- a/src/tensor_array/layers/attention/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from tensor_array.layers.attention.attention import MultiheadAttention
\ No newline at end of file
diff --git a/src/tensor_array/layers/attention/attention.py b/src/tensor_array/layers/attention/attention.py
deleted file mode 100644
index 0889971..0000000
--- a/src/tensor_array/layers/attention/attention.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from typing import Any
-from .. import Layer
-from ..util import Linear
-from tensor_array.core import Tensor
-from tensor_array.activation import softmax
-
-def scaled_dot_product_attention(q, k, v, mask = None):
- attn_scores = q @ k.transpose(len(k.shape()) - 2, len(k.shape()) - 1)
- attn_probs = softmax(attn_scores, len(attn_scores.shape()) - 1)
- return attn_probs @ v
-
-class MultiheadAttention(Layer):
- def __init__(self, d_model, n_head) -> None:
- super().__init__()
- self.linear_q = Linear(d_model)
- self.linear_k = Linear(d_model)
- self.linear_v = Linear(d_model)
- self.linear_o = Linear(d_model)
- self.n_head = n_head
-
- def calculate(self, input_q, input_k, input_v, mask = None) -> Any:
- temp_q = self.linear_q(input_q)
- temp_k = self.linear_k(input_k)
- temp_v = self.linear_v(input_v)
-
- temp_q = temp_q.reshape((temp_q.shape()[0], temp_q.shape()[1], self.n_head, temp_q.shape()[-1] / self.n_head)).transpose(1, 2)
- temp_k = temp_k.reshape((temp_k.shape()[0], temp_k.shape()[1], self.n_head, temp_k.shape()[-1] / self.n_head)).transpose(1, 2)
- temp_v = temp_v.reshape((temp_v.shape()[0], temp_v.shape()[1], self.n_head, temp_v.shape()[-1] / self.n_head)).transpose(1, 2)
-
- attention_output = scaled_dot_product_attention(temp_q, temp_k, temp_v, mask)
-
- attention_output = attention_output.transpose(1, 2)
- attention_output = attention_output.reshape((temp_q.shape()[0], temp_q.shape()[1], temp_q.shape[-2] * temp_q.shape[-1]))
- return self.linear_o(attention_output)
diff --git a/src/tensor_array/layers/attention/transformer.py b/src/tensor_array/layers/attention/transformer.py
deleted file mode 100644
index f8e1d87..0000000
--- a/src/tensor_array/layers/attention/transformer.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from typing import Any
-from .. import Layer
-from .attention import MultiheadAttention
-from tensor_array.activation import relu
-from ..util import Sequential
-from ..util import Linear
-from ..util import Activation
-
-class TransformerEncoderImpl(Layer):
- def __init__(self, d_model, n_head, ff_size) -> None:
- self.feed_forward = Sequential([
- Linear(ff_size),
- Activation(relu),
- Linear(d_model)
- ])
- self.multihead_attn = MultiheadAttention(d_model, n_head)
- self.layer_norm_1
- self.layer_norm_2
-
- def calculate(self, input) -> Any:
- attn_output = self.multihead_attn(input, input, input)
- attn_output = self.layer_norm_1(input + attn_output)
- ff_output = self.feed_forward(attn_output)
- return self.layer_norm_2(attn_output + ff_output)
\ No newline at end of file
diff --git a/src/tensor_array/layers/layer.py b/src/tensor_array/layers/layer.py
deleted file mode 100644
index e7f405c..0000000
--- a/src/tensor_array/layers/layer.py
+++ /dev/null
@@ -1,142 +0,0 @@
-from collections import OrderedDict, namedtuple
-from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
-from typing import Any
-from tensor_array.core import Tensor
-from .parameter import Parameter
-
-class Layer:
- """
- Layer class
- """
- is_running: bool
- _layers: Dict[str, Optional['Layer']]
- _parameters: Dict[str, Optional[Parameter]]
- _tensors: Dict[str, Optional[Tensor]]
-
- def __init__(self) -> None:
- super().__setattr__('is_running', False)
- super().__setattr__('_layers', OrderedDict())
- super().__setattr__('_parameters', OrderedDict())
- super().__setattr__('_tensors', OrderedDict())
-
- def __call__(self, *args: Any, **kwds: Any) -> Any:
- if not self.__dict__['is_running']:
- list_arg = (t.shape() for t in args if isinstance(t, Tensor))
- dict_kwargs = {
- key: val.shape()
- for key, val in kwds
- if isinstance(val, Tensor)
- }
- self.layer_init(*list_arg, **dict_kwargs)
- super().__setattr__('is_running', True)
- return self.calculate(*args, **kwds)
-
- def layer_init(self, *args: Tuple, **kwds: Tuple) -> None:
- pass
-
- def calculate(self, *args: Any, **kwds: Any) -> Any:
- pass
-
- def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
- if '_parameters' not in self.__dict__:
- raise AttributeError("cannot assign parameter before Module.__init__() call")
- elif not isinstance(name, str):
- raise TypeError(f"parameter name should be a string. Got {name}")
- elif '.' in name:
- raise KeyError("parameter name can't contain \".\"")
- elif name == '':
- raise KeyError("parameter name can't be empty string \"\"")
- elif hasattr(self, name) and name not in self._parameters:
- raise KeyError(f"attribute '{name}' already exists")
- elif not isinstance(param, Parameter) and param is not None:
- raise TypeError(f"cannot assign '{param}' object to parameter '{name}' "
- "(tensor_array.util.Parameter or None required)")
- else:
- self._parameters[name] = param
-
- def register_tensor(self, name: str, param: Optional[Tensor]) -> None:
- if '_tensors' not in self.__dict__:
- raise AttributeError("cannot assign tensor before Module.__init__() call")
- elif not isinstance(name, str):
- raise TypeError(f"tensor name should be a string. Got {name}")
- elif '.' in name:
- raise KeyError("tensor name can't contain \".\"")
- elif name == '':
- raise KeyError("tensor name can't be empty string \"\"")
- elif hasattr(self, name) and name not in self._tensors:
- raise KeyError(f"attribute '{name}' already exists")
- elif not isinstance(param, Tensor) and param is not None:
- raise TypeError(f"cannot assign '{param}' object to parameter '{name}' "
- "(tensor_array.core.tensor2.Tensor or None required)")
- else:
- self._tensors[name] = param
-
- def register_layer(self, name: str, layer: Optional['Layer']) -> None:
- if not isinstance(layer, Layer) and layer is not None:
- raise TypeError(f"{layer} is not a Layer subclass")
- elif not isinstance(name, str):
- raise TypeError(f"layer name should be a string. Got {name}")
- elif hasattr(self, name) and name not in self._layers:
- raise KeyError(f"attribute '{name}' already exists")
- elif '.' in name:
- raise KeyError(f"layer name can't contain \".\", got: {name}")
- elif name == '':
- raise KeyError("layer name can't be empty string \"\"")
- self._layers[name] = layer
-
- def __setattr__(self, __name: str, __value: Any) -> None:
- def remove_from(*dicts_or_sets):
- for d in dicts_or_sets:
- if __name in d:
- if isinstance(d, dict):
- del d[__name]
- else:
- d.discard(__name)
-
- params = self.__dict__.get('_parameters')
- layers = self.__dict__.get('_layers')
- tensors = self.__dict__.get('_tensors')
- if (params is not None and __name in params) or (layers is not None and __name in layers) or (tensors is not None and __name in tensors):
- raise TypeError(f"cannot assign '{__value}' as parameter '{__name}'")
- elif isinstance(__value, Parameter):
- if params is None:
- raise AttributeError("cannot assign parameters before Layer.__init__() call")
- remove_from(self.__dict__, self._layers, self._tensors)
- self.register_parameter(__name, __value)
- elif isinstance(__value, Tensor):
- if layers is None:
- raise AttributeError("cannot assign layers before Layer.__init__() call")
- remove_from(self.__dict__, self._parameters, self._layers)
- self.register_tensor(__name, __value)
- elif isinstance(__value, Layer):
- if tensors is None:
- raise AttributeError("cannot assign layers before Layer.__init__() call")
- remove_from(self.__dict__, self._parameters, self._tensors)
- self.register_layer(__name, __value)
- else:
- super().__setattr__(__name, __value)
-
- def __getattr__(self, __name: str) -> Any:
- if '_parameters' in self.__dict__:
- _parameters = self.__dict__['_parameters']
- if __name in _parameters:
- return _parameters[__name]
- if '_tensors' in self.__dict__:
- _tensors = self.__dict__['_tensors']
- if __name in _tensors:
- return _tensors[__name]
- if '_layers' in self.__dict__:
- _layers = self.__dict__['_layers']
- if __name in _layers:
- return _layers[__name]
- return super().__getattr__(__name)
-
- def __delattr__(self, __name: str) -> None:
- if __name in self._parameters:
- del self._parameters[__name]
- elif __name in self._tensors:
- del self._tensors[__name]
- elif __name in self._layers:
- del self._layers[__name]
- else:
- super().__delattr__(__name)
diff --git a/src/tensor_array/layers/normalization/__init__.py b/src/tensor_array/layers/normalization/__init__.py
deleted file mode 100644
index 32af77a..0000000
--- a/src/tensor_array/layers/normalization/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from tensor_array.layers.normalization import Normalization
\ No newline at end of file
diff --git a/src/tensor_array/layers/normalization/normalization.py b/src/tensor_array/layers/normalization/normalization.py
deleted file mode 100644
index db424d9..0000000
--- a/src/tensor_array/layers/normalization/normalization.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .. import Layer
-
-class Normalization(Layer):
- pass
\ No newline at end of file
diff --git a/src/tensor_array/layers/parameter.py b/src/tensor_array/layers/parameter.py
deleted file mode 100644
index 54b5640..0000000
--- a/src/tensor_array/layers/parameter.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from tensor_array.core import Tensor
-
-class Parameter(Tensor):
- pass
diff --git a/src/tensor_array/layers/util/__init__.py b/src/tensor_array/layers/util/__init__.py
deleted file mode 100644
index 3cfe48a..0000000
--- a/src/tensor_array/layers/util/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from tensor_array.layers.util.activation import Activation
-from tensor_array.layers.util.linear import Linear
-from tensor_array.layers.util.sequential import Sequential
diff --git a/src/tensor_array/layers/util/activation.py b/src/tensor_array/layers/util/activation.py
deleted file mode 100644
index 0117ef5..0000000
--- a/src/tensor_array/layers/util/activation.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from .. import Layer
-from typing import Any, Callable
-
-class Activation(Layer):
- def __init__(self, activation_function: Callable) -> None:
- super().__init__()
- self.activation_function = activation_function
-
- def calculate(self, *args: Any, **kwds: Any) -> Any:
- return self.activation_function(*args, **kwds)
\ No newline at end of file
diff --git a/src/tensor_array/layers/util/linear.py b/src/tensor_array/layers/util/linear.py
deleted file mode 100644
index 75cb0dd..0000000
--- a/src/tensor_array/layers/util/linear.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from .. import Layer
-from .. import Parameter
-from tensor_array.core import Tensor
-from tensor_array.core import zeros
-from tensor_array.core import DataType
-from typing import Any
-
-
-class Linear(Layer):
- def __init__(self, bias) -> None:
- super().__init__()
- self.bias_shape = bias
- self.b = Parameter(zeros(shape = (bias,), dtype = DataType.FLOAT))
-
- def layer_init(self, t):
- self.w = Parameter(zeros(shape = (t[-1], self.bias_shape), dtype = DataType.FLOAT))
-
- def calculate(self, t):
- return t @ self.w + self.b
-
\ No newline at end of file
diff --git a/src/tensor_array/layers/util/sequential.py b/src/tensor_array/layers/util/sequential.py
deleted file mode 100644
index 4edd36b..0000000
--- a/src/tensor_array/layers/util/sequential.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .. import Layer
-from .. import Parameter
-from tensor_array.core import Tensor
-from tensor_array.core import zeros
-from tensor_array.core import DataType
-from typing import Any, List, OrderedDict
-
-
-class Sequential(Layer):
- def __init__(self, _layers: OrderedDict[str, Layer]) -> None:
- self._layers = _layers
-
- def calculate(self, t):
- tensorloop = t
- for _, content in self._layers:
- tensorloop = content(tensorloop)
- return tensorloop
\ No newline at end of file
diff --git a/tensor-array-repo/Tensor-Array b/tensor-array-repo/Tensor-Array
deleted file mode 160000
index 6351b0a..0000000
--- a/tensor-array-repo/Tensor-Array
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 6351b0ab48aa14d8ee238d2b3f1fef52cc3531d3
diff --git a/third_party/pybind11 b/third_party/pybind11
deleted file mode 160000
index aec6cc5..0000000
--- a/third_party/pybind11
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit aec6cc5406edb076f5a489c2d7f84bb07052c4a3