Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: NixOS/nixpkgs
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 026dc16b85f4
Choose a base ref
...
head repository: NixOS/nixpkgs
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: ce00943916f8
Choose a head ref
  • 1 commit
  • 2 files changed
  • 1 contributor

Commits on May 4, 2018

  1. pytorch: 0.2.0 → 0.3.1 with CUDA and cuDNN (#38530)

    * pytorch-0.3 with optional cuda and cudnn
    
    * pytorch tests reenabled if compiling without cuda
    
    * pytorch: Conditionalize cudnn dependency on cudaSupport
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: Compile with the same GCC version used by CUDA if cudaSupport
    
    Fixes this error:
    
    In file included from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/host_config.h:50:0,
                     from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/cuda_runtime.h:78,
                     from <command-line>:0:
    /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/crt/host_config.h:121:2: error: #error -- unsupported GNU version! gcc versions later than 6 are not supported!
     #error -- unsupported GNU version! gcc versions later than 6 are not supported!
      ^~~~~
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: Build with joined cudatoolkit
    
    Similar to #30058 for TensorFlow.
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: 0.3.0 -> 0.3.1
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: Patch for “refcounted file mapping not supported” failure
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: Skip distributed tests
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    
    * pytorch: Use the stub libcuda.so from cudatoolkit for running tests
    
    Signed-off-by: Anders Kaseorg <andersk@mit.edu>
    andersk authored and FRidh committed May 4, 2018
    Copy the full SHA
    ce00943 View commit details
Showing with 67 additions and 11 deletions.
  1. +56 −10 pkgs/development/python-modules/pytorch/default.nix
  2. +11 −1 pkgs/top-level/python-packages.nix
66 changes: 56 additions & 10 deletions pkgs/development/python-modules/pytorch/default.nix
Original file line number Diff line number Diff line change
@@ -1,36 +1,82 @@
{ buildPythonPackage, fetchFromGitHub, lib, numpy, pyyaml, cffi, cmake,
git, stdenv }:
{ buildPythonPackage,
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
fetchFromGitHub, fetchpatch, lib, numpy, pyyaml, cffi, cmake,
git, stdenv, linkFarm, symlinkJoin,
utillinux, which }:

buildPythonPackage rec {
version = "0.2.0";
assert cudnn == null || cudatoolkit != null;
assert !cudaSupport || cudatoolkit != null;

let
cudatoolkit_joined = symlinkJoin {
name = "${cudatoolkit.name}-unsplit";
paths = [ cudatoolkit.out cudatoolkit.lib ];
};

# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
# libcuda.so from cudatoolkit for running tests, so that we don’t have
# to recompile pytorch on every update to nvidia-x11 or the kernel.
cudaStub = linkFarm "cuda-stub" [{
name = "libcuda.so.1";
path = "${cudatoolkit}/lib/stubs/libcuda.so";
}];
cudaStubEnv = lib.optionalString cudaSupport
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";

in buildPythonPackage rec {
version = "0.3.1";
pname = "pytorch";
name = "${pname}-${version}";

src = fetchFromGitHub {
owner = "pytorch";
repo = "pytorch";
rev = "v${version}";
sha256 = "1s3f46ga1f4lfrcj3lpvvhgkdr1pi8i2hjd9xj9qiz3a9vh2sj4n";
fetchSubmodules = true;
sha256 = "1k8fr97v5pf7rni5cr2pi21ixc3pdj3h3lkz28njbjbgkndh7mr3";
};

checkPhase = ''
${stdenv.shell} test/run_test.sh
patches = [
(fetchpatch {
# make sure stdatomic.h is included when checking for ATOMIC_INT_LOCK_FREE
# Fixes this test failure:
# RuntimeError: refcounted file mapping not supported on your system at /tmp/nix-build-python3.6-pytorch-0.3.0.drv-0/source/torch/lib/TH/THAllocator.c:525
url = "https://github.com/pytorch/pytorch/commit/502aaf39cf4a878f9e4f849e5f409573aa598aa9.patch";
stripLen = 3;
extraPrefix = "torch/lib/";
sha256 = "1miz4lhy3razjwcmhxqa4xmlcmhm65lqyin1czqczj8g16d3f62f";
})
];

postPatch = ''
substituteInPlace test/run_test.sh --replace \
"INIT_METHOD='file://'\$TEMP_DIR'/shared_init_file' \$PYCMD ./test_distributed.py" \
"echo Skipped for Nix package"
'';

preConfigure = lib.optionalString cudaSupport ''
export CC=${cudatoolkit.cc}/bin/gcc
'' + lib.optionalString (cudaSupport && cudnn != null) ''
export CUDNN_INCLUDE_DIR=${cudnn}/include
'';

buildInputs = [
cmake
git
numpy.blas
];
utillinux
which
] ++ lib.optionals cudaSupport [cudatoolkit_joined cudnn];

propagatedBuildInputs = [
cffi
numpy
pyyaml
];

preConfigure = ''
export NO_CUDA=1
checkPhase = ''
${cudaStubEnv}${stdenv.shell} test/run_test.sh
'';

meta = {
12 changes: 11 additions & 1 deletion pkgs/top-level/python-packages.nix
Original file line number Diff line number Diff line change
@@ -5623,7 +5623,17 @@ in {
};
};

pytorch = callPackage ../development/python-modules/pytorch { };
pytorch = callPackage ../development/python-modules/pytorch {
cudaSupport = pkgs.config.cudaSupport or false;
};

pytorchWithCuda = self.pytorch.override {
cudaSupport = true;
};

pytorchWithoutCuda = self.pytorch.override {
cudaSupport = false;
};

python2-pythondialog = buildPythonPackage rec {
name = "python2-pythondialog-${version}";