Skip to content

Use mlir-python-bindings package instead of manually built bindings #15

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 0 additions & 64 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,67 +49,3 @@ array([0, 1], dtype=uint64)
>>> values
array([ 8. , 33.625])
```

# Building MLIR

Until LLVM 16.0 is released, the required MLIR operations in the sparse_tensor dialect will only be
available on the main repo branch, meaning there aren't any packages readily available to install
from conda-forge.

To build locally, download the LLVM repo from GitHub.

```
git clone https://github.com/llvm/llvm-project.git
```

Next create a conda environment for building the project.

```
conda create -n mlir_build_env -y
conda activate mlir_build_env
conda install python=3.10 numpy pyyaml cmake ninja pybind11 python-mlir-graphblas
```

Define `PREFIX` as the location of your environment (active env location when running `conda info`).
Then run cmake.

```
export PREFIX=/location/to/your/conda/environment

cd llvm-project
mkdir build
cd build

cmake -G Ninja ../llvm \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DLLVM_ENABLE_PROJECTS=mlir \
-DLLVM_BUILD_EXAMPLES=ON \
-DLLVM_INSTALL_UTILS=ON \
-DLLVM_TARGETS_TO_BUILD="X86;AArch64;NVPTX;AMDGPU" \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DLLVM_BUILD_LLVM_DYLIB=ON \
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
-DPython3_EXECUTABLE=`which python`
```

If building on a Mac, perform this additional step.

```
cp $PREFIX/lib/libtinfo* lib/
cp $PREFIX/lib/libz* lib/
```

Then build the project

```
cmake --build .
cmake --install .
```

Finally, set `LLVM_BUILD_DIR` to point to the current build directory.
Now python-mlir-graphblas should be usable. Verify by running tests.

```
LLVM_BUILD_DIR=. pytest --pyargs mlir_graphblas
```
12 changes: 12 additions & 0 deletions dev-environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# To use:
# $ conda env create -f dev-environment.yml
# $ conda activate mlir-graphblas-dev

name: mlir-graphblas-dev
channels:
- conda-forge
- nodefaults # Only from conda-forge for faster solving
dependencies:
- python
- mlir-python-bindings >=16.0
- pytest
9 changes: 5 additions & 4 deletions mlir_graphblas/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# TODO: remove this once mlir-python-bindings can be properly installed
import os
import sys
sys.path.append(f"{os.environ['LLVM_BUILD_DIR']}/tools/mlir/python_packages/mlir_core")
try:
import mlir
import mlir.ir
except ImportError:
raise ImportError("Missing mlir-python-bindings")

from .operations import *
from .operators import *
Expand Down
7 changes: 3 additions & 4 deletions mlir_graphblas/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from mlir import ir
from mlir import passmanager

from .utils import LIBMLIR_C_RUNNER_UTILS
from .utils import MLIR_C_RUNNER_UTILS


engine_cache = {}
Expand All @@ -12,8 +12,7 @@ def compile(module: ir.Module, pipeline=None, *, opt_level=2, shared_libs=None):
if pipeline is None:
pipeline = 'builtin.module(sparse-compiler{reassociate-fp-reductions=1 enable-index-optimizations=1})'
if shared_libs is None:
shared_libs = [LIBMLIR_C_RUNNER_UTILS]
shared_libs = [MLIR_C_RUNNER_UTILS]
# print(module)
passmanager.PassManager.parse(pipeline).run(module)
return execution_engine.ExecutionEngine(
module, opt_level=opt_level, shared_libs=shared_libs)
return execution_engine.ExecutionEngine(module, opt_level=opt_level, shared_libs=shared_libs)
1 change: 0 additions & 1 deletion mlir_graphblas/implementations.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,6 @@ def main(x, y):

def ewise_add(out_type: DType, op: BinaryOp, left: SparseTensorBase, right: SparseTensorBase):
assert left.ndims == right.ndims
assert left.dtype == right.dtype

if left._obj is None:
if right.dtype == out_type:
Expand Down
10 changes: 9 additions & 1 deletion mlir_graphblas/tests/test_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,15 @@ def test_mxm(mm):
# rowwise @ colwise
z.clear()
operations.mxm(z, Semiring.plus_times, x, ycol)
matrix_compare(z, *expected)
try:
matrix_compare(z, *expected)
except AssertionError:
# Check for dense return, indicating lack of lex insert fix
matrix_compare(z,
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4],
[20.9, 0, 0, 0, 0, 16.5, 0, 0, 0, 0, 5.5, 0, 0, 0, 70.4, 0, 0, 0, 0, 0, 0, 0, 0, 13.2, 0])
pytest.xfail("Waiting for lex insert fix")
# colwise @ colwise
z.clear()
operations.mxm(z, Semiring.plus_times, xcol, ycol)
Expand Down
8 changes: 6 additions & 2 deletions mlir_graphblas/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

def vector_compare(vec, i, v):
assert vec.ndims == 1
assert len(i) == len(v), f"{len(i)} != {len(v)}"
idx, vals = vec.extract_tuples()
assert vals.dtype == vec.dtype.np_type
np_assert_equal(idx, i)
Expand All @@ -13,12 +14,15 @@ def vector_compare(vec, i, v):

def matrix_compare(mat, r, c, v):
assert mat.ndims == 2
assert len(r) == len(c), f"{len(r)} != {len(c)}"
assert len(r) == len(v), f"{len(r)} != {len(v)}"
rows, cols, vals = mat.extract_tuples()
assert vals.dtype == mat.dtype.np_type
combo = np.core.records.fromarrays([r, c], names='r,c')
if mat.is_rowwise():
sort_order = np.argsort(r)
sort_order = np.argsort(combo, order=['r', 'c'])
else:
sort_order = np.argsort(c)
sort_order = np.argsort(combo, order=['c', 'r'])
np_assert_equal(rows, np.array(r)[sort_order])
np_assert_equal(cols, np.array(c)[sort_order])
np_assert_allclose(vals, np.array(v)[sort_order])
11 changes: 4 additions & 7 deletions mlir_graphblas/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import ctypes
import numpy as np
import ctypes
from ctypes.util import find_library
from enum import Enum
from mlir import ir
from mlir.dialects.sparse_tensor import DimLevelType
Expand All @@ -8,13 +9,9 @@
GrbDimensionMismatch, GrbOutputNotEmpty, GrbIndexOutOfBounds, GrbEmptyObject
)


MLIR_C_RUNNER_UTILS = find_library("mlir_c_runner_utils")
c_lib = ctypes.CDLL(MLIR_C_RUNNER_UTILS)
LLVMPTR = ctypes.POINTER(ctypes.c_char)
# TODO: fix this once a proper package exists
import os
LIBMLIR_C_RUNNER_UTILS = f"{os.environ['LLVM_BUILD_DIR']}/lib/libmlir_c_runner_utils.dylib"

c_lib = ctypes.CDLL(LIBMLIR_C_RUNNER_UTILS)


def get_sparse_output_pointer():
Expand Down
Loading