Remove implicit this capture in lambdas
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index e55459d..288d79f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -932,7 +932,9 @@
         kernel(m, n, k, use_thread_local);
       } else {
         eigen_assert(!use_thread_local);
-        device_.enqueueNoNotification([=]() { kernel(m, n, k, use_thread_local); });
+        device_.enqueueNoNotification([this, m, n, k, use_thread_local]() { 
+            kernel(m, n, k, use_thread_local); 
+          });
       }
     }
 
@@ -980,7 +982,9 @@
       } else {
         while (end - start > 1) {
           Index mid = (start + end) / 2;
-          device_.enqueueNoNotification([=]() { enqueue_packing_helper(mid, end, k, rhs); });
+          device_.enqueueNoNotification([this, mid, end, k, rhs]() { 
+              enqueue_packing_helper(mid, end, k, rhs);
+            });
           end = mid;
         }
 
@@ -996,7 +1000,9 @@
                           (k > 0 || std::this_thread::get_id() == created_by_thread_id_);
 
         if (pack_async) {
-          device_.enqueueNoNotification([=]() { enqueue_packing_helper(start, end, k, rhs); });
+          device_.enqueueNoNotification([this, start, end, k, rhs]() { 
+              enqueue_packing_helper(start, end, k, rhs);
+            });
         } else {
           enqueue_packing_helper(start, end, k, rhs);
         }
@@ -1277,7 +1283,9 @@
       while (end_block_idx - start_block_idx > 1) {
         Index mid_block_idx = (start_block_idx + end_block_idx) / 2;
         evaluator->m_device.enqueueNoNotification(
-            [this, mid_block_idx, end_block_idx]() { evalAsync<Alignment>(mid_block_idx, end_block_idx); });
+            [this, mid_block_idx, end_block_idx]() { 
+              evalAsync<Alignment>(mid_block_idx, end_block_idx);
+            });
         end_block_idx = mid_block_idx;
       }
 
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
index ca1fe6a..c95c8f2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
@@ -192,7 +192,7 @@
     // block_count leaves that do actual computations.
     Barrier barrier(static_cast<unsigned int>(block.count));
     std::function<void(Index, Index)> handleRange;
-    handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) {
+    handleRange = [this, block, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) {
       while (lastIdx - firstIdx > block.size) {
         // Split into halves and schedule the second half on a different thread.
         const Index midIdx = firstIdx + numext::div_ceil((lastIdx - firstIdx) / 2, block.size) * block.size;