diff --git a/Makefile.in b/Makefile.in index f5fdcd97aa64d..f1b18e8f64b0a 100644 --- a/Makefile.in +++ b/Makefile.in @@ -235,6 +235,8 @@ CFG_LIBSYNTAX_$(1) :=$(call CFG_LIB_NAME_$(1),syntax) CFG_LIBRUSTPKG_$(1) :=$(call CFG_LIB_NAME_$(1),rustpkg) CFG_LIBRUSTDOC_$(1) :=$(call CFG_LIB_NAME_$(1),rustdoc) CFG_LIBRUSTUV_$(1) :=$(call CFG_LIB_NAME_$(1),rustuv) +CFG_LIBGREEN_$(1) :=$(call CFG_LIB_NAME_$(1),green) +CFG_LIBNATIVE_$(1) :=$(call CFG_LIB_NAME_$(1),native) EXTRALIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),extra) STDLIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),std) @@ -243,6 +245,8 @@ LIBSYNTAX_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),syntax) LIBRUSTPKG_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustpkg) LIBRUSTDOC_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustdoc) LIBRUSTUV_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustuv) +LIBGREEN_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),green) +LIBNATIVE_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),native) EXTRALIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),extra) STDLIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),std) LIBRUSTC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustc) @@ -250,12 +254,16 @@ LIBSYNTAX_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),syntax) LIBRUSTPKG_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustpkg) LIBRUSTDOC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustdoc) LIBRUSTUV_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustuv) +LIBGREEN_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),green) +LIBNATIVE_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),native) EXTRALIB_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,extra) STDLIB_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,std) LIBRUSTUV_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,rustuv) LIBSYNTAX_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,syntax) LIBRUSTC_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,rustc) +LIBNATIVE_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,native) +LIBGREEN_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,green) endef @@ -272,9 +280,15 @@ define CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT endef # Same interface as above, but deletes rather than just listing the files. +ifdef VERBOSE define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT $(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then echo "warning: removing previous" \'$(2)\' "libraries:" $$MATCHES; rm $$MATCHES ; fi endef +else +define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT + $(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then rm $$MATCHES ; fi +endef +endif # We use a different strategy for LIST_ALL_OLD_GLOB_MATCHES_EXCEPT # than in the macros above because it needs the result of running the @@ -319,6 +333,22 @@ LIBRUSTUV_CRATE := $(S)src/librustuv/lib.rs LIBRUSTUV_INPUTS := $(wildcard $(addprefix $(S)src/librustuv/, \ *.rs */*.rs)) +###################################################################### +# Green threading library variables +###################################################################### + +LIBGREEN_CRATE := $(S)src/libgreen/lib.rs +LIBGREEN_INPUTS := $(wildcard $(addprefix $(S)src/libgreen/, \ + *.rs */*.rs)) + +###################################################################### +# Native threading library variables +###################################################################### + +LIBNATIVE_CRATE := $(S)src/libnative/lib.rs +LIBNATIVE_INPUTS := $(wildcard $(addprefix $(S)src/libnative/, \ + *.rs */*.rs)) + ###################################################################### # rustc crate variables ###################################################################### @@ -430,6 +460,16 @@ HLIBRUSTUV_DEFAULT$(1)_H_$(3) = \ TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3) = \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) +HLIBGREEN_DEFAULT$(1)_H_$(3) = \ + $$(HLIB$(1)_H_$(3))/$(CFG_LIBGREEN_$(3)) +TLIBGREEN_DEFAULT$(1)_T_$(2)_H_$(3) = \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) + +HLIBNATIVE_DEFAULT$(1)_H_$(3) = \ + $$(HLIB$(1)_H_$(3))/$(CFG_LIBNATIVE_$(3)) +TLIBNATIVE_DEFAULT$(1)_T_$(2)_H_$(3) = \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)) + # Preqrequisites for using the stageN compiler ifeq ($(1),0) HSREQ$(1)_H_$(3) = $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) @@ -441,6 +481,8 @@ HSREQ$(1)_H_$(3) = \ $$(HLIBSYNTAX_DEFAULT$(1)_H_$(3)) \ $$(HLIBRUSTC_DEFAULT$(1)_H_$(3)) \ $$(HLIBRUSTUV_DEFAULT$(1)_H_$(3)) \ + $$(HLIBGREEN_DEFAULT$(1)_H_$(3)) \ + $$(HLIBNATIVE_DEFAULT$(1)_H_$(3)) \ $$(MKFILE_DEPS) endif @@ -455,7 +497,9 @@ SREQ$(1)_T_$(2)_H_$(3) = \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)) # Prerequisites for a working stageN compiler and libraries, for a specific target CSREQ$(1)_T_$(2)_H_$(3) = \ @@ -470,7 +514,9 @@ CSREQ$(1)_T_$(2)_H_$(3) = \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTPKG_$(2)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTDOC_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)) ifeq ($(1),0) # Don't run the the stage0 compiler under valgrind - that ship has sailed diff --git a/mk/clean.mk b/mk/clean.mk index 93af331acfbd7..c59298d1ecb63 100644 --- a/mk/clean.mk +++ b/mk/clean.mk @@ -90,6 +90,8 @@ clean$(1)_H_$(2): $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_STDLIB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_EXTRALIB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTUV_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBNATIVE_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBGREEN_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTC_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBSYNTAX_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(STDLIB_GLOB_$(2)) @@ -98,6 +100,10 @@ clean$(1)_H_$(2): $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(EXTRALIB_RGLOB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTUV_GLOB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTUV_RGLOB_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBNATIVE_GLOB_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBNATIVE_RGLOB_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBGREEN_GLOB_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBGREEN_RGLOB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTC_GLOB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBSYNTAX_GLOB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTPKG_GLOB_$(2)) @@ -124,6 +130,8 @@ clean$(1)_T_$(2)_H_$(3): $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(STDLIB_GLOB_$(2)) @@ -132,6 +140,10 @@ clean$(1)_T_$(2)_H_$(3): $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(EXTRALIB_RGLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTUV_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTUV_RGLOB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBNATIVE_GLOB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBNATIVE_RGLOB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBGREEN_GLOB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBGREEN_RGLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_RGLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBSYNTAX_GLOB_$(2)) diff --git a/mk/dist.mk b/mk/dist.mk index 264d3105839cb..9f0f64f4c6561 100644 --- a/mk/dist.mk +++ b/mk/dist.mk @@ -35,6 +35,9 @@ PKG_FILES := \ libextra \ libstd \ libsyntax \ + librustuv \ + libgreen \ + libnative \ rt \ librustdoc \ rustllvm \ diff --git a/mk/host.mk b/mk/host.mk index 3ddbd7d69c96c..9997384d2502a 100644 --- a/mk/host.mk +++ b/mk/host.mk @@ -25,13 +25,7 @@ define CP_HOST_STAGE_N $$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \ $$(TBIN$(1)_T_$(4)_H_$(3))/rustc$$(X_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)) \ - $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ - $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ - $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ - $$(HLIBRUSTC_DEFAULT$(2)_H_$(4)) \ - $$(HLIBSYNTAX_DEFAULT$(2)_H_$(4)) \ | $$(HBIN$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(Q)cp $$< $$@ @@ -39,10 +33,6 @@ $$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)): \ $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBRUSTC_$(4)) \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBSYNTAX_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ - $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ - $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ - $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) @@ -55,10 +45,11 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBSYNTAX_$(4)): \ $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBSYNTAX_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ + $$(HLIBGREEN_DEFAULT$(2)_H_$(4)) \ + $$(HLIBNATIVE_DEFAULT$(2)_H_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(4)),$$(notdir $$@)) @@ -76,7 +67,6 @@ $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)): \ $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_STDLIB_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(STDLIB_GLOB_$(4)),$$(notdir $$@)) @@ -98,8 +88,7 @@ $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_EXTRALIB_$(4)): \ $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_EXTRALIB_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ + $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@)) @@ -115,7 +104,6 @@ $$(HLIB$(2)_H_$(4))/$(CFG_EXTRALIB_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTUV_$(4)): \ $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBRUSTUV_$(4)) \ $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \ - $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) @@ -128,6 +116,36 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTUV_$(4)): \ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_RGLOB_$(4)),$$(notdir $$@)) +$$(HLIB$(2)_H_$(4))/$(CFG_LIBGREEN_$(4)): \ + $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBGREEN_$(4)) \ + $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \ + | $$(HLIB$(2)_H_$(4))/ + @$$(call E, cp: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(4)),$$(notdir $$@)) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(4)),$$(notdir $$@)) + $$(Q)cp $$< $$@ + $$(Q)cp -R $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_GLOB_$(4)) \ + $$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_RGLOB_$(4))) \ + $$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_DSYM_GLOB_$(4))) \ + $$(HLIB$(2)_H_$(4)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(4)),$$(notdir $$@)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(4)),$$(notdir $$@)) + +$$(HLIB$(2)_H_$(4))/$(CFG_LIBNATIVE_$(4)): \ + $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBNATIVE_$(4)) \ + $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \ + | $$(HLIB$(2)_H_$(4))/ + @$$(call E, cp: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(4)),$$(notdir $$@)) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(4)),$$(notdir $$@)) + $$(Q)cp $$< $$@ + $$(Q)cp -R $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_GLOB_$(4)) \ + $$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_RGLOB_$(4))) \ + $$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_DSYM_GLOB_$(4))) \ + $$(HLIB$(2)_H_$(4)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(4)),$$(notdir $$@)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(4)),$$(notdir $$@)) + $$(HBIN$(2)_H_$(4))/: mkdir -p $$@ diff --git a/mk/install.mk b/mk/install.mk index 37eaa6f52167a..f81367010ed71 100644 --- a/mk/install.mk +++ b/mk/install.mk @@ -94,6 +94,10 @@ install-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ$$(ISTAG $$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_RGLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_RGLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_RGLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_RGLOB_$(1))) $$(Q)$$(call INSTALL_LIB,libmorestack.a) endef @@ -109,6 +113,10 @@ install-target-$(1)-host-$(2): $$(CSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_RGLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_RGLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_RGLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_RGLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTC_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBSYNTAX_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTPKG_GLOB_$(1))) @@ -149,6 +157,7 @@ install-host: $(CSREQ$(ISTAGE)_T_$(CFG_BUILD_)_H_$(CFG_BUILD_)) $(Q)$(call INSTALL_LIB,$(STDLIB_GLOB_$(CFG_BUILD))) $(Q)$(call INSTALL_LIB,$(EXTRALIB_GLOB_$(CFG_BUILD))) $(Q)$(call INSTALL_LIB,$(LIBRUSTUV_GLOB_$(CFG_BUILD))) + $(Q)$(call INSTALL_LIB,$(LIBGREEN_GLOB_$(CFG_BUILD))) $(Q)$(call INSTALL_LIB,$(LIBRUSTC_GLOB_$(CFG_BUILD))) $(Q)$(call INSTALL_LIB,$(LIBSYNTAX_GLOB_$(CFG_BUILD))) $(Q)$(call INSTALL_LIB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD))) @@ -174,6 +183,10 @@ uninstall: $(call HOST_LIB_FROM_HL_GLOB,$(EXTRALIB_RGLOB_$(CFG_BUILD))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTUV_GLOB_$(CFG_BUILD))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTUV_RGLOB_$(CFG_BUILD))) \ + $(call HOST_LIB_FROM_HL_GLOB,$(LIBGREEN_GLOB_$(CFG_BUILD))) \ + $(call HOST_LIB_FROM_HL_GLOB,$(LIBGREEN_RGLOB_$(CFG_BUILD))) \ + $(call HOST_LIB_FROM_HL_GLOB,$(LIBNATIVE_GLOB_$(CFG_BUILD))) \ + $(call HOST_LIB_FROM_HL_GLOB,$(LIBNATIVE_RGLOB_$(CFG_BUILD))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTC_GLOB_$(CFG_BUILD))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBSYNTAX_GLOB_$(CFG_BUILD))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD))) \ @@ -237,6 +250,7 @@ install-runtime-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(STDLIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(EXTRALIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(LIBRUSTUV_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) + $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(LIBGREEN_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) endef define INSTALL_RUNTIME_TARGET_CLEANUP_N @@ -245,6 +259,7 @@ install-runtime-target-$(1)-cleanup: $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(STDLIB_GLOB_$(1))) $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(EXTRALIB_GLOB_$(1))) $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(LIBRUSTUV_GLOB_$(1))) + $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(LIBGREEN_GLOB_$(1))) endef $(eval $(call INSTALL_RUNTIME_TARGET_N,arm-linux-androideabi,$(CFG_BUILD))) diff --git a/mk/target.mk b/mk/target.mk index 3c7ffd83d9681..64bc6ab3afdae 100644 --- a/mk/target.mk +++ b/mk/target.mk @@ -94,12 +94,37 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)): \ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(2)),$$(notdir $$@)) $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_RGLOB_$(2)),$$(notdir $$@)) +$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)): \ + $$(LIBGREEN_CRATE) $$(LIBGREEN_INPUTS) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \ + $$(TSREQ$(1)_T_$(2)_H_$(3)) \ + | $$(TLIB$(1)_T_$(2)_H_$(3))/ + @$$(call E, compile_and_link: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(2)),$$(notdir $$@)) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(2)),$$(notdir $$@)) + $$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) \ + --out-dir $$(@D) $$< && touch $$@ + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(2)),$$(notdir $$@)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(2)),$$(notdir $$@)) + +$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)): \ + $$(LIBNATIVE_CRATE) $$(LIBNATIVE_INPUTS) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \ + $$(TSREQ$(1)_T_$(2)_H_$(3)) \ + | $$(TLIB$(1)_T_$(2)_H_$(3))/ + @$$(call E, compile_and_link: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(2)),$$(notdir $$@)) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(2)),$$(notdir $$@)) + $$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) \ + --out-dir $$(@D) $$< && touch $$@ + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(2)),$$(notdir $$@)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(2)),$$(notdir $$@)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(3)): \ $$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ $$(TSTDLIB_DEFAULT$(1)_T_$(2)_H_$(3)) \ $$(TEXTRALIB_DEFAULT$(1)_T_$(2)_H_$(3)) \ - $$(TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3)) \ | $$(TLIB$(1)_T_$(2)_H_$(3))/ @$$(call E, compile_and_link: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(2)),$$(notdir $$@)) @@ -135,16 +160,13 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)): \ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_GLOB_$(2)),$$(notdir $$@)) $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_RGLOB_$(2)),$$(notdir $$@)) -# NOTE: after the next snapshot remove these '-L' flags $$(TBIN$(1)_T_$(2)_H_$(3))/rustc$$(X_$(3)): \ $$(DRIVER_CRATE) \ - $$(TSREQ$(1)_T_$(2)_H_$(3)) \ + $$(SREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)) \ | $$(TBIN$(1)_T_$(2)_H_$(3))/ @$$(call E, compile_and_link: $$@) - $$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$< \ - -L $$(UV_SUPPORT_DIR_$(2)) \ - -L $$(dir $$(LIBUV_LIB_$(2))) + $$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$< ifdef CFG_ENABLE_PAX_FLAGS @$$(call E, apply PaX flags: $$@) @"$(CFG_PAXCTL)" -cm "$$@" diff --git a/mk/tests.mk b/mk/tests.mk index 1a56c008ccb70..9fd9d9617c7d3 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -14,7 +14,7 @@ ###################################################################### # The names of crates that must be tested -TEST_TARGET_CRATES = std extra rustuv +TEST_TARGET_CRATES = std extra rustuv green native TEST_DOC_CRATES = std extra TEST_HOST_CRATES = rustpkg rustc rustdoc syntax TEST_CRATES = $(TEST_TARGET_CRATES) $(TEST_HOST_CRATES) @@ -162,6 +162,8 @@ $(info check: android device test dir $(CFG_ADB_TEST_DIR) ready \ $(CFG_ADB_TEST_DIR)) \ $(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD))/$(LIBRUSTUV_GLOB_arm-linux-androideabi) \ $(CFG_ADB_TEST_DIR)) \ + $(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD))/$(LIBGREEN_GLOB_arm-linux-androideabi) \ + $(CFG_ADB_TEST_DIR)) \ ) else CFG_ADB_TEST_DIR= @@ -187,7 +189,7 @@ check-test: cleantestlibs cleantmptestlogs all check-stage2-rfail check-lite: cleantestlibs cleantmptestlogs \ check-stage2-std check-stage2-extra check-stage2-rpass \ - check-stage2-rustuv \ + check-stage2-rustuv check-stage2-native check-stage2-green \ check-stage2-rustpkg \ check-stage2-rfail check-stage2-cfail check-stage2-rmake $(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log @@ -339,19 +341,20 @@ define TEST_RUNNER ifeq ($(NO_REBUILD),) STDTESTDEP_$(1)_$(2)_$(3) = $$(SREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_EXTRALIB_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBRUSTUV_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBRUSTUV_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBGREEN_$(2)) else STDTESTDEP_$(1)_$(2)_$(3) = endif $(3)/stage$(1)/test/stdtest-$(2)$$(X_$(2)): \ - $$(STDLIB_CRATE) $$(STDLIB_INPUTS) \ + $$(STDLIB_CRATE) $$(STDLIB_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test $(3)/stage$(1)/test/extratest-$(2)$$(X_$(2)): \ - $$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \ + $$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test @@ -364,6 +367,18 @@ $(3)/stage$(1)/test/rustuvtest-$(2)$$(X_$(2)): \ -L $$(UV_SUPPORT_DIR_$(2)) \ -L $$(dir $$(LIBUV_LIB_$(2))) +$(3)/stage$(1)/test/nativetest-$(2)$$(X_$(2)): \ + $$(LIBNATIVE_CRATE) $$(LIBNATIVE_INPUTS) \ + $$(STDTESTDEP_$(1)_$(2)_$(3)) + @$$(call E, compile_and_link: $$@) + $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test + +$(3)/stage$(1)/test/greentest-$(2)$$(X_$(2)): \ + $$(LIBGREEN_CRATE) $$(LIBGREEN_INPUTS) \ + $$(STDTESTDEP_$(1)_$(2)_$(3)) + @$$(call E, compile_and_link: $$@) + $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test + $(3)/stage$(1)/test/syntaxtest-$(2)$$(X_$(2)): \ $$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) @@ -375,7 +390,7 @@ $(3)/stage$(1)/test/rustctest-$(2)$$(X_$(2)): \ $$(COMPILER_CRATE) $$(COMPILER_INPUTS) \ $$(SREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_RUSTLLVM_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBSYNTAX_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBSYNTAX_$(2)) @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test \ -L "$$(LLVM_LIBDIR_$(2))" @@ -416,10 +431,10 @@ check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4 $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \ $(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)) @$$(call E, run: $$<) - $$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS) \ - --logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \ - $$(call CRATE_TEST_EXTRA_ARGS,$(1),$(2),$(3),$(4)) \ - && touch $$@ + $$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS) \ + --logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \ + $$(call CRATE_TEST_EXTRA_ARGS,$(1),$(2),$(3),$(4)) \ + && touch $$@ endef define DEF_TEST_CRATE_RULES_arm-linux-androideabi diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 0fb75b7c8e0c0..ae7d1a30a841d 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -13,10 +13,11 @@ #[allow(non_camel_case_types)]; #[deny(warnings)]; +#[cfg(stage0)] extern mod green; extern mod extra; use std::os; -use std::rt; +use std::io; use std::io::fs; use extra::getopts; @@ -234,7 +235,7 @@ pub fn run_tests(config: &config) { // sadly osx needs some file descriptor limits raised for running tests in // parallel (especially when we have lots and lots of child processes). // For context, see #8904 - rt::test::prepare_for_lots_of_tests(); + io::test::raise_fd_limit(); let res = test::run_tests_console(&opts, tests); if !res { fail!("Some tests failed"); } } diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs index 4176c6698c996..b22c17fb4387e 100644 --- a/src/compiletest/runtest.rs +++ b/src/compiletest/runtest.rs @@ -757,8 +757,8 @@ fn make_lib_name(config: &config, auxfile: &Path, testfile: &Path) -> Path { fn make_exe_name(config: &config, testfile: &Path) -> Path { let mut f = output_base_name(config, testfile); - if !os::EXE_SUFFIX.is_empty() { - match f.filename().map(|s| s + os::EXE_SUFFIX.as_bytes()) { + if !os::consts::EXE_SUFFIX.is_empty() { + match f.filename().map(|s| s + os::consts::EXE_SUFFIX.as_bytes()) { Some(v) => f.set_filename(v), None => () } diff --git a/src/driver/driver.rs b/src/driver/driver.rs index 9402578d5525a..8e5b6356a0b83 100644 --- a/src/driver/driver.rs +++ b/src/driver/driver.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#[cfg(stage0)] extern mod green; + #[cfg(rustpkg)] extern mod this = "rustpkg"; diff --git a/src/etc/licenseck.py b/src/etc/licenseck.py index 78d0973fdfe2e..073322b0815a2 100644 --- a/src/etc/licenseck.py +++ b/src/etc/licenseck.py @@ -76,9 +76,9 @@ "rt/isaac/randport.cpp", # public domain "rt/isaac/rand.h", # public domain "rt/isaac/standard.h", # public domain - "libstd/rt/mpsc_queue.rs", # BSD - "libstd/rt/spsc_queue.rs", # BSD - "libstd/rt/mpmc_bounded_queue.rs", # BSD + "libstd/sync/mpsc_queue.rs", # BSD + "libstd/sync/spsc_queue.rs", # BSD + "libstd/sync/mpmc_bounded_queue.rs", # BSD ] def check_license(name, contents): diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs index c1763c37bb5a1..a411c4e9185b1 100644 --- a/src/libextra/arc.rs +++ b/src/libextra/arc.rs @@ -45,7 +45,7 @@ use sync; use sync::{Mutex, RWLock}; use std::cast; -use std::unstable::sync::UnsafeArc; +use std::sync::arc::UnsafeArc; use std::task; use std::borrow; @@ -127,20 +127,6 @@ impl Arc { pub fn get<'a>(&'a self) -> &'a T { unsafe { &*self.x.get_immut() } } - - /** - * Retrieve the data back out of the Arc. This function blocks until the - * reference given to it is the last existing one, and then unwrap the data - * instead of destroying it. - * - * If multiple tasks call unwrap, all but the first will fail. Do not call - * unwrap from a task that holds another reference to the same Arc; it is - * guaranteed to deadlock. - */ - pub fn unwrap(self) -> T { - let Arc { x: x } = self; - x.unwrap() - } } impl Clone for Arc { @@ -247,22 +233,6 @@ impl MutexArc { cond: cond }) }) } - - /** - * Retrieves the data, blocking until all other references are dropped, - * exactly as arc::unwrap. - * - * Will additionally fail if another task has failed while accessing the arc. - */ - pub fn unwrap(self) -> T { - let MutexArc { x: x } = self; - let inner = x.unwrap(); - let MutexArcInner { failed: failed, data: data, .. } = inner; - if failed { - fail!("Can't unwrap poisoned MutexArc - another task failed inside!"); - } - data - } } impl MutexArc { @@ -503,23 +473,6 @@ impl RWArc { } } } - - /** - * Retrieves the data, blocking until all other references are dropped, - * exactly as arc::unwrap. - * - * Will additionally fail if another task has failed while accessing the arc - * in write mode. - */ - pub fn unwrap(self) -> T { - let RWArc { x: x, .. } = self; - let inner = x.unwrap(); - let RWArcInner { failed: failed, data: data, .. } = inner; - if failed { - fail!("Can't unwrap poisoned RWArc - another task failed inside!") - } - data - } } // Borrowck rightly complains about immutably aliasing the rwlock in order to @@ -689,22 +642,6 @@ mod tests { }) } - #[test] #[should_fail] - pub fn test_mutex_arc_unwrap_poison() { - let arc = MutexArc::new(1); - let arc2 = ~(&arc).clone(); - let (p, c) = Chan::new(); - do task::spawn { - arc2.access(|one| { - c.send(()); - assert!(*one == 2); - }) - } - let _ = p.recv(); - let one = arc.unwrap(); - assert!(one == 1); - } - #[test] fn test_unsafe_mutex_arc_nested() { unsafe { diff --git a/src/libextra/comm.rs b/src/libextra/comm.rs index c3b17fe996405..52b5bedb7ea49 100644 --- a/src/libextra/comm.rs +++ b/src/libextra/comm.rs @@ -96,7 +96,6 @@ pub fn rendezvous() -> (SyncPort, SyncChan) { #[cfg(test)] mod test { use comm::{DuplexStream, rendezvous}; - use std::rt::test::run_in_uv_task; #[test] @@ -124,13 +123,11 @@ mod test { #[test] fn recv_a_lot() { // Rendezvous streams should be able to handle any number of messages being sent - do run_in_uv_task { - let (port, chan) = rendezvous(); - do spawn { - 1000000.times(|| { chan.send(()) }) - } - 1000000.times(|| { port.recv() }) + let (port, chan) = rendezvous(); + do spawn { + 1000000.times(|| { chan.send(()) }) } + 1000000.times(|| { port.recv() }) } #[test] diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index 57a7f38696d6c..f43329076c8b8 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -19,8 +19,9 @@ use std::borrow; -use std::unstable::sync::{Exclusive, UnsafeArc}; -use std::unstable::atomics; +use std::unstable::sync::Exclusive; +use std::sync::arc::UnsafeArc; +use std::sync::atomics; use std::unstable::finally::Finally; use std::util; use std::util::NonCopyable; @@ -78,7 +79,7 @@ impl WaitQueue { fn wait_end(&self) -> WaitEnd { let (wait_end, signal_end) = Chan::new(); - self.tail.send_deferred(signal_end); + assert!(self.tail.try_send_deferred(signal_end)); wait_end } } @@ -760,23 +761,21 @@ mod tests { fn test_sem_runtime_friendly_blocking() { // Force the runtime to schedule two threads on the same sched_loop. // When one blocks, it should schedule the other one. - do task::spawn_sched(task::SingleThreaded) { - let s = Semaphore::new(1); - let s2 = s.clone(); - let (p, c) = Chan::new(); - let mut child_data = Some((s2, c)); - s.access(|| { - let (s2, c) = child_data.take_unwrap(); - do task::spawn { - c.send(()); - s2.access(|| { }); - c.send(()); - } - let _ = p.recv(); // wait for child to come alive - 5.times(|| { task::deschedule(); }); // let the child contend - }); - let _ = p.recv(); // wait for child to be done - } + let s = Semaphore::new(1); + let s2 = s.clone(); + let (p, c) = Chan::new(); + let mut child_data = Some((s2, c)); + s.access(|| { + let (s2, c) = child_data.take_unwrap(); + do task::spawn { + c.send(()); + s2.access(|| { }); + c.send(()); + } + let _ = p.recv(); // wait for child to come alive + 5.times(|| { task::deschedule(); }); // let the child contend + }); + let _ = p.recv(); // wait for child to be done } /************************************************************************ * Mutex tests diff --git a/src/libextra/task_pool.rs b/src/libextra/task_pool.rs index f0c9833adf804..ba38f87628731 100644 --- a/src/libextra/task_pool.rs +++ b/src/libextra/task_pool.rs @@ -14,12 +14,9 @@ /// parallelism. -use std::task::SchedMode; use std::task; use std::vec; -#[cfg(test)] use std::task::SingleThreaded; - enum Msg { Execute(proc(&T)), Quit @@ -46,7 +43,6 @@ impl TaskPool { /// returns a function which, given the index of the task, should return /// local data to be kept around in that task. pub fn new(n_tasks: uint, - opt_sched_mode: Option, init_fn_factory: || -> proc(uint) -> T) -> TaskPool { assert!(n_tasks >= 1); @@ -65,18 +61,8 @@ impl TaskPool { } }; - // Start the task. - match opt_sched_mode { - None => { - // Run on this scheduler. - task::spawn(task_body); - } - Some(sched_mode) => { - let mut task = task::task(); - task.sched_mode(sched_mode); - task.spawn(task_body); - } - } + // Run on this scheduler. + task::spawn(task_body); chan }); @@ -99,7 +85,7 @@ fn test_task_pool() { let g: proc(uint) -> uint = proc(i) i; g }; - let mut pool = TaskPool::new(4, Some(SingleThreaded), f); + let mut pool = TaskPool::new(4, f); 8.times(|| { pool.execute(proc(i) println!("Hello from thread {}!", *i)); }) diff --git a/src/libstd/rt/basic.rs b/src/libgreen/basic.rs similarity index 79% rename from src/libstd/rt/basic.rs rename to src/libgreen/basic.rs index 3589582357c56..0574792c18da8 100644 --- a/src/libstd/rt/basic.rs +++ b/src/libgreen/basic.rs @@ -11,15 +11,15 @@ //! This is a basic event loop implementation not meant for any "real purposes" //! other than testing the scheduler and proving that it's possible to have a //! pluggable event loop. +//! +//! This implementation is also used as the fallback implementation of an event +//! loop if no other one is provided (and M:N scheduling is desired). -use prelude::*; - -use cast; -use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback, - Callback}; -use unstable::sync::Exclusive; -use io::native; -use util; +use std::cast; +use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback, + Callback}; +use std::unstable::sync::Exclusive; +use std::util; /// This is the only exported function from this module. pub fn event_loop() -> ~EventLoop { @@ -32,7 +32,6 @@ struct BasicLoop { remotes: ~[(uint, ~Callback)], next_remote: uint, messages: Exclusive<~[Message]>, - io: ~IoFactory, } enum Message { RunRemote(uint), RemoveRemote(uint) } @@ -45,7 +44,6 @@ impl BasicLoop { next_remote: 0, remotes: ~[], messages: Exclusive::new(~[]), - io: ~native::IoFactory as ~IoFactory, } } @@ -159,10 +157,7 @@ impl EventLoop for BasicLoop { ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback } - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { - let factory: &mut IoFactory = self.io; - Some(factory) - } + fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { None } } struct BasicRemote { @@ -228,3 +223,61 @@ impl Drop for BasicPausable { } } } + +#[cfg(test)] +mod test { + use std::task::TaskOpts; + + use basic; + use PoolConfig; + use SchedPool; + + fn pool() -> SchedPool { + SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: Some(basic::event_loop), + }) + } + + fn run(f: proc()) { + let mut pool = pool(); + pool.spawn(TaskOpts::new(), f); + pool.shutdown(); + } + + #[test] + fn smoke() { + do run {} + } + + #[test] + fn some_channels() { + do run { + let (p, c) = Chan::new(); + do spawn { + c.send(()); + } + p.recv(); + } + } + + #[test] + fn multi_thread() { + let mut pool = SchedPool::new(PoolConfig { + threads: 2, + event_loop_factory: Some(basic::event_loop), + }); + + for _ in range(0, 20) { + do pool.spawn(TaskOpts::new()) { + let (p, c) = Chan::new(); + do spawn { + c.send(()); + } + p.recv(); + } + } + + pool.shutdown(); + } +} diff --git a/src/libstd/rt/context.rs b/src/libgreen/context.rs similarity index 53% rename from src/libstd/rt/context.rs rename to src/libgreen/context.rs index 31cf069688168..8530e3e837ea8 100644 --- a/src/libstd/rt/context.rs +++ b/src/libgreen/context.rs @@ -8,14 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::*; -use super::stack::StackSegment; -use libc::c_void; -use uint; -use cast::{transmute, transmute_mut_unsafe, - transmute_region, transmute_mut_region}; +use std::libc::c_void; +use std::uint; +use std::cast::{transmute, transmute_mut_unsafe, + transmute_region, transmute_mut_region}; +use std::unstable::stack; -pub static RED_ZONE: uint = 20 * 1024; +use stack::StackSegment; // FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing // SSE regs. It would be marginally better not to do this. In C++ we @@ -43,32 +42,47 @@ impl Context { /// Create a new context that will resume execution by running proc() pub fn new(start: proc(), stack: &mut StackSegment) -> Context { - // FIXME #7767: Putting main into a ~ so it's a thin pointer and can - // be passed to the spawn function. Another unfortunate - // allocation - let start = ~start; - // The C-ABI function that is the task entry point + // + // Note that this function is a little sketchy. We're taking a + // procedure, transmuting it to a stack-closure, and then calling to + // closure. This leverages the fact that the representation of these two + // types is the same. + // + // The reason that we're doing this is that this procedure is expected + // to never return. The codegen which frees the environment of the + // procedure occurs *after* the procedure has completed, and this means + // that we'll never actually free the procedure. + // + // To solve this, we use this transmute (to not trigger the procedure + // deallocation here), and then store a copy of the procedure in the + // `Context` structure returned. When the `Context` is deallocated, then + // the entire procedure box will be deallocated as well. extern fn task_start_wrapper(f: &proc()) { - // XXX(pcwalton): This may be sketchy. unsafe { let f: &|| = transmute(f); (*f)() } } - let fp: *c_void = task_start_wrapper as *c_void; - let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) }; let sp: *uint = stack.end(); let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) }; // Save and then immediately load the current context, // which we will then modify to call the given function when restored let mut regs = new_regs(); unsafe { - rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs)); + rust_swap_registers(transmute_mut_region(&mut *regs), + transmute_region(&*regs)); }; - initialize_call_frame(&mut *regs, fp, argp, sp); + // FIXME #7767: Putting main into a ~ so it's a thin pointer and can + // be passed to the spawn function. Another unfortunate + // allocation + let start = ~start; + initialize_call_frame(&mut *regs, + task_start_wrapper as *c_void, + unsafe { transmute(&*start) }, + sp); // Scheduler tasks don't have a stack in the "we allocated it" sense, // but rather they run on pthreads stacks. We have complete control over @@ -113,17 +127,18 @@ impl Context { // invalid for the current task. Lucky for us `rust_swap_registers` // is a C function so we don't have to worry about that! match in_context.stack_bounds { - Some((lo, hi)) => record_stack_bounds(lo, hi), + Some((lo, hi)) => stack::record_stack_bounds(lo, hi), // If we're going back to one of the original contexts or // something that's possibly not a "normal task", then reset // the stack limit to 0 to make morestack never fail - None => record_stack_bounds(0, uint::max_value), + None => stack::record_stack_bounds(0, uint::max_value), } rust_swap_registers(out_regs, in_regs) } } } +#[link(name = "rustrt", kind = "static")] extern { fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers); } @@ -282,182 +297,6 @@ fn align_down(sp: *mut uint) -> *mut uint { // ptr::mut_offset is positive ints only #[inline] pub fn mut_offset(ptr: *mut T, count: int) -> *mut T { - use mem::size_of; + use std::mem::size_of; (ptr as int + count * (size_of::() as int)) as *mut T } - -#[inline(always)] -pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // When the old runtime had segmented stacks, it used a calculation that was - // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic - // symbol resolution, llvm function calls, etc. In theory this red zone - // value is 0, but it matters far less when we have gigantic stacks because - // we don't need to be so exact about our stack budget. The "fudge factor" - // was because LLVM doesn't emit a stack check for functions < 256 bytes in - // size. Again though, we have giant stacks, so we round all these - // calculations up to the nice round number of 20k. - record_sp_limit(stack_lo + RED_ZONE); - - return target_record_stack_bounds(stack_lo, stack_hi); - - #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)] - unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {} - #[cfg(windows, target_arch = "x86_64")] #[inline(always)] - unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // Windows compiles C functions which may check the stack bounds. This - // means that if we want to perform valid FFI on windows, then we need - // to ensure that the stack bounds are what they truly are for this - // task. More info can be found at: - // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839 - // - // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom) - asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile"); - asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile"); - } -} - -/// Records the current limit of the stack as specified by `end`. -/// -/// This is stored in an OS-dependent location, likely inside of the thread -/// local storage. The location that the limit is stored is a pre-ordained -/// location because it's where LLVM has emitted code to check. -/// -/// Note that this cannot be called under normal circumstances. This function is -/// changing the stack limit, so upon returning any further function calls will -/// possibly be triggering the morestack logic if you're not careful. -/// -/// Also note that this and all of the inside functions are all flagged as -/// "inline(always)" because they're messing around with the stack limits. This -/// would be unfortunate for the functions themselves to trigger a morestack -/// invocation (if they were an actual function call). -#[inline(always)] -pub unsafe fn record_sp_limit(limit: uint) { - return target_record_sp_limit(limit); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $$0x60+90*8, %rsi - movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot", but double the size - // because this is 64 bit instead of 32 bit - asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile") - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $$0x48+90*4, %eax - movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile") - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot" - asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile") - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - return record_sp_limit(limit as *c_void); - extern { - fn record_sp_limit(limit: *c_void); - } - } -} - -/// The counterpart of the function above, this function will fetch the current -/// stack limit stored in TLS. -/// -/// Note that all of these functions are meant to be exact counterparts of their -/// brethren above, except that the operands are reversed. -/// -/// As with the setter, this function does not have a __morestack header and can -/// therefore be called in a "we're out of stack" situation. -#[inline(always)] -// currently only called by `rust_stack_exhausted`, which doesn't -// exist in a test build. -#[cfg(not(test))] -pub unsafe fn get_sp_limit() -> uint { - return target_get_sp_limit(); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq $$0x60+90*8, %rsi - movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl $$0x48+90*4, %eax - movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - return get_sp_limit() as uint; - extern { - fn get_sp_limit() -> *c_void; - } - } -} diff --git a/src/libgreen/coroutine.rs b/src/libgreen/coroutine.rs new file mode 100644 index 0000000000000..7bc5d0accfe3b --- /dev/null +++ b/src/libgreen/coroutine.rs @@ -0,0 +1,62 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Coroutines represent nothing more than a context and a stack +// segment. + +use std::rt::env; + +use context::Context; +use stack::{StackPool, StackSegment}; + +/// A coroutine is nothing more than a (register context, stack) pair. +pub struct Coroutine { + /// The segment of stack on which the task is currently running or + /// if the task is blocked, on which the task will resume + /// execution. + /// + /// Servo needs this to be public in order to tell SpiderMonkey + /// about the stack bounds. + current_stack_segment: StackSegment, + + /// Always valid if the task is alive and not running. + saved_context: Context +} + +impl Coroutine { + pub fn new(stack_pool: &mut StackPool, + stack_size: Option, + start: proc()) + -> Coroutine { + let stack_size = match stack_size { + Some(size) => size, + None => env::min_stack() + }; + let mut stack = stack_pool.take_segment(stack_size); + let initial_context = Context::new(start, &mut stack); + Coroutine { + current_stack_segment: stack, + saved_context: initial_context + } + } + + pub fn empty() -> Coroutine { + Coroutine { + current_stack_segment: StackSegment::new(0), + saved_context: Context::empty() + } + } + + /// Destroy coroutine and try to reuse std::stack segment. + pub fn recycle(self, stack_pool: &mut StackPool) { + let Coroutine { current_stack_segment, .. } = self; + stack_pool.give_segment(current_stack_segment); + } +} diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs new file mode 100644 index 0000000000000..3a2e8a2b36cf5 --- /dev/null +++ b/src/libgreen/lib.rs @@ -0,0 +1,320 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The "green scheduling" library +//! +//! This library provides M:N threading for rust programs. Internally this has +//! the implementation of a green scheduler along with context switching and a +//! stack-allocation strategy. +//! +//! This can be optionally linked in to rust programs in order to provide M:N +//! functionality inside of 1:1 programs. + +#[pkgid = "green#0.9-pre"]; +#[crate_id = "green#0.9-pre"]; +#[license = "MIT/ASL2"]; +#[crate_type = "rlib"]; +#[crate_type = "dylib"]; + +// NB this does *not* include globs, please keep it that way. +#[feature(macro_rules)]; + +use std::os; +use std::rt::crate_map; +use std::rt::local::Local; +use std::rt::rtio; +use std::rt::task::Task; +use std::rt::thread::Thread; +use std::rt; +use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; +use std::sync::deque; +use std::task::TaskOpts; +use std::util; +use std::vec; + +use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor}; +use sleeper_list::SleeperList; +use stack::StackPool; +use task::GreenTask; + +mod macros; +mod simple; + +pub mod basic; +pub mod context; +pub mod coroutine; +pub mod sched; +pub mod sleeper_list; +pub mod stack; +pub mod task; + +#[lang = "start"] +#[cfg(not(test))] +pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { + use std::cast; + do start(argc, argv) { + let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; + main(); + } +} + +/// Set up a default runtime configuration, given compiler-supplied arguments. +/// +/// This function will block until the entire pool of M:N schedulers have +/// exited. This function also requires a local task to be available. +/// +/// # Arguments +/// +/// * `argc` & `argv` - The argument vector. On Unix this information is used +/// by os::args. +/// * `main` - The initial procedure to run inside of the M:N scheduling pool. +/// Once this procedure exits, the scheduling pool will begin to shut +/// down. The entire pool (and this function) will only return once +/// all child tasks have finished executing. +/// +/// # Return value +/// +/// The return value is used as the process return code. 0 on success, 101 on +/// error. +pub fn start(argc: int, argv: **u8, main: proc()) -> int { + rt::init(argc, argv); + let mut main = Some(main); + let mut ret = None; + simple::task().run(|| { + ret = Some(run(main.take_unwrap())); + }); + // unsafe is ok b/c we're sure that the runtime is gone + unsafe { rt::cleanup() } + ret.unwrap() +} + +/// Execute the main function in a pool of M:N schedulers. +/// +/// Configures the runtime according to the environment, by default using a task +/// scheduler with the same number of threads as cores. Returns a process exit +/// code. +/// +/// This function will not return until all schedulers in the associated pool +/// have returned. +pub fn run(main: proc()) -> int { + // Create a scheduler pool and spawn the main task into this pool. We will + // get notified over a channel when the main task exits. + let mut pool = SchedPool::new(PoolConfig::new()); + let (port, chan) = Chan::new(); + let mut opts = TaskOpts::new(); + opts.notify_chan = Some(chan); + opts.name = Some(SendStrStatic("
")); + pool.spawn(opts, main); + + // Wait for the main task to return, and set the process error code + // appropriately. + if port.recv().is_err() { + os::set_exit_status(rt::DEFAULT_ERROR_CODE); + } + + // Once the main task has exited and we've set our exit code, wait for all + // spawned sub-tasks to finish running. This is done to allow all schedulers + // to remain active while there are still tasks possibly running. + unsafe { + let mut task = Local::borrow(None::); + task.get().wait_for_other_tasks(); + } + + // Now that we're sure all tasks are dead, shut down the pool of schedulers, + // waiting for them all to return. + pool.shutdown(); + os::get_exit_status() +} + +/// Configuration of how an M:N pool of schedulers is spawned. +pub struct PoolConfig { + /// The number of schedulers (OS threads) to spawn into this M:N pool. + threads: uint, + /// A factory function used to create new event loops. If this is not + /// specified then the default event loop factory is used. + event_loop_factory: Option ~rtio::EventLoop>, +} + +impl PoolConfig { + /// Returns the default configuration, as determined the the environment + /// variables of this process. + pub fn new() -> PoolConfig { + PoolConfig { + threads: rt::default_sched_threads(), + event_loop_factory: None, + } + } +} + +/// A structure representing a handle to a pool of schedulers. This handle is +/// used to keep the pool alive and also reap the status from the pool. +pub struct SchedPool { + priv id: uint, + priv threads: ~[Thread<()>], + priv handles: ~[SchedHandle], + priv stealers: ~[deque::Stealer<~task::GreenTask>], + priv next_friend: uint, + priv stack_pool: StackPool, + priv deque_pool: deque::BufferPool<~task::GreenTask>, + priv sleepers: SleeperList, + priv factory: fn() -> ~rtio::EventLoop, +} + +impl SchedPool { + /// Execute the main function in a pool of M:N schedulers. + /// + /// This will configure the pool according to the `config` parameter, and + /// initially run `main` inside the pool of schedulers. + pub fn new(config: PoolConfig) -> SchedPool { + static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT; + + let PoolConfig { + threads: nscheds, + event_loop_factory: factory + } = config; + let factory = factory.unwrap_or(default_event_loop_factory()); + assert!(nscheds > 0); + + // The pool of schedulers that will be returned from this function + let mut pool = SchedPool { + threads: ~[], + handles: ~[], + stealers: ~[], + id: unsafe { POOL_ID.fetch_add(1, SeqCst) }, + sleepers: SleeperList::new(), + stack_pool: StackPool::new(), + deque_pool: deque::BufferPool::new(), + next_friend: 0, + factory: factory, + }; + + // Create a work queue for each scheduler, ntimes. Create an extra + // for the main thread if that flag is set. We won't steal from it. + let arr = vec::from_fn(nscheds, |_| pool.deque_pool.deque()); + let (workers, stealers) = vec::unzip(arr.move_iter()); + pool.stealers = stealers; + + // Now that we've got all our work queues, create one scheduler per + // queue, spawn the scheduler into a thread, and be sure to keep a + // handle to the scheduler and the thread to keep them alive. + for worker in workers.move_iter() { + rtdebug!("inserting a regular scheduler"); + + let mut sched = ~Scheduler::new(pool.id, + (pool.factory)(), + worker, + pool.stealers.clone(), + pool.sleepers.clone()); + pool.handles.push(sched.make_handle()); + let sched = sched; + pool.threads.push(do Thread::start { + let mut sched = sched; + let task = do GreenTask::new(&mut sched.stack_pool, None) { + rtdebug!("boostraping a non-primary scheduler"); + }; + sched.bootstrap(task); + }); + } + + return pool; + } + + pub fn task(&mut self, opts: TaskOpts, f: proc()) -> ~GreenTask { + GreenTask::configure(&mut self.stack_pool, opts, f) + } + + pub fn spawn(&mut self, opts: TaskOpts, f: proc()) { + let task = self.task(opts, f); + + // Figure out someone to send this task to + let idx = self.next_friend; + self.next_friend += 1; + if self.next_friend >= self.handles.len() { + self.next_friend = 0; + } + + // Jettison the task away! + self.handles[idx].send(TaskFromFriend(task)); + } + + /// Spawns a new scheduler into this M:N pool. A handle is returned to the + /// scheduler for use. The scheduler will not exit as long as this handle is + /// active. + /// + /// The scheduler spawned will participate in work stealing with all of the + /// other schedulers currently in the scheduler pool. + pub fn spawn_sched(&mut self) -> SchedHandle { + let (worker, stealer) = self.deque_pool.deque(); + self.stealers.push(stealer.clone()); + + // Tell all existing schedulers about this new scheduler so they can all + // steal work from it + for handle in self.handles.mut_iter() { + handle.send(NewNeighbor(stealer.clone())); + } + + // Create the new scheduler, using the same sleeper list as all the + // other schedulers as well as having a stealer handle to all other + // schedulers. + let mut sched = ~Scheduler::new(self.id, + (self.factory)(), + worker, + self.stealers.clone(), + self.sleepers.clone()); + let ret = sched.make_handle(); + self.handles.push(sched.make_handle()); + let sched = sched; + self.threads.push(do Thread::start { + let mut sched = sched; + let task = do GreenTask::new(&mut sched.stack_pool, None) { + rtdebug!("boostraping a non-primary scheduler"); + }; + sched.bootstrap(task); + }); + + return ret; + } + + pub fn shutdown(mut self) { + self.stealers = ~[]; + + for mut handle in util::replace(&mut self.handles, ~[]).move_iter() { + handle.send(Shutdown); + } + for thread in util::replace(&mut self.threads, ~[]).move_iter() { + thread.join(); + } + } +} + +impl Drop for SchedPool { + fn drop(&mut self) { + if self.threads.len() > 0 { + fail!("dropping a M:N scheduler pool that wasn't shut down"); + } + } +} + +fn default_event_loop_factory() -> fn() -> ~rtio::EventLoop { + match crate_map::get_crate_map() { + None => {} + Some(map) => { + match map.event_loop_factory { + None => {} + Some(factory) => return factory + } + } + } + + // If the crate map didn't specify a factory to create an event loop, then + // instead just use a basic event loop missing all I/O services to at least + // get the scheduler running. + return basic::event_loop; +} diff --git a/src/libgreen/macros.rs b/src/libgreen/macros.rs new file mode 100644 index 0000000000000..56dc3204da86e --- /dev/null +++ b/src/libgreen/macros.rs @@ -0,0 +1,129 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// XXX: this file probably shouldn't exist + +#[macro_escape]; + +use std::fmt; +use std::libc; + +// Indicates whether we should perform expensive sanity checks, including rtassert! +// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc. +pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert); + +macro_rules! rterrln ( + ($($arg:tt)*) => ( { + format_args!(::macros::dumb_println, $($arg)*) + } ) +) + +// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build. +macro_rules! rtdebug ( + ($($arg:tt)*) => ( { + if cfg!(rtdebug) { + rterrln!($($arg)*) + } + }) +) + +macro_rules! rtassert ( + ( $arg:expr ) => ( { + if ::macros::ENFORCE_SANITY { + if !$arg { + rtabort!(" assertion failed: {}", stringify!($arg)); + } + } + } ) +) + + +macro_rules! rtabort ( + ($($arg:tt)*) => ( { + ::macros::abort(format!($($arg)*)); + } ) +) + +pub fn dumb_println(args: &fmt::Arguments) { + use std::io; + use std::libc; + + struct Stderr; + impl io::Writer for Stderr { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDERR_FILENO, + data.as_ptr() as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut w = Stderr; + fmt::writeln(&mut w as &mut io::Writer, args); +} + +pub fn abort(msg: &str) -> ! { + let msg = if !msg.is_empty() { msg } else { "aborted" }; + let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) ); + let quote = match hash % 10 { + 0 => " +It was from the artists and poets that the pertinent answers came, and I +know that panic would have broken loose had they been able to compare notes. +As it was, lacking their original letters, I half suspected the compiler of +having asked leading questions, or of having edited the correspondence in +corroboration of what he had latently resolved to see.", + 1 => " +There are not many persons who know what wonders are opened to them in the +stories and visions of their youth; for when as children we listen and dream, +we think but half-formed thoughts, and when as men we try to remember, we are +dulled and prosaic with the poison of life. But some of us awake in the night +with strange phantasms of enchanted hills and gardens, of fountains that sing +in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch +down to sleeping cities of bronze and stone, and of shadowy companies of heroes +that ride caparisoned white horses along the edges of thick forests; and then +we know that we have looked back through the ivory gates into that world of +wonder which was ours before we were wise and unhappy.", + 2 => " +Instead of the poems I had hoped for, there came only a shuddering blackness +and ineffable loneliness; and I saw at last a fearful truth which no one had +ever dared to breathe before — the unwhisperable secret of secrets — The fact +that this city of stone and stridor is not a sentient perpetuation of Old New +York as London is of Old London and Paris of Old Paris, but that it is in fact +quite dead, its sprawling body imperfectly embalmed and infested with queer +animate things which have nothing to do with it as it was in life.", + 3 => " +The ocean ate the last of the land and poured into the smoking gulf, thereby +giving up all it had ever conquered. From the new-flooded lands it flowed +again, uncovering death and decay; and from its ancient and immemorial bed it +trickled loathsomely, uncovering nighted secrets of the years when Time was +young and the gods unborn. Above the waves rose weedy remembered spires. The +moon laid pale lilies of light on dead London, and Paris stood up from its damp +grave to be sanctified with star-dust. Then rose spires and monoliths that were +weedy but not remembered; terrible spires and monoliths of lands that men never +knew were lands...", + 4 => " +There was a night when winds from unknown spaces whirled us irresistibly into +limitless vacuum beyond all thought and entity. Perceptions of the most +maddeningly untransmissible sort thronged upon us; perceptions of infinity +which at the time convulsed us with joy, yet which are now partly lost to my +memory and partly incapable of presentation to others.", + _ => "You've met with a terrible fate, haven't you?" + }; + rterrln!("{}", ""); + rterrln!("{}", quote); + rterrln!("{}", ""); + rterrln!("fatal runtime error: {}", msg); + + abort(); + + fn abort() -> ! { + unsafe { libc::abort() } + } +} diff --git a/src/libstd/rt/sched.rs b/src/libgreen/sched.rs similarity index 61% rename from src/libstd/rt/sched.rs rename to src/libgreen/sched.rs index 15aa1602cd0e0..ef62f654ddf48 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libgreen/sched.rs @@ -8,27 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::{Option, Some, None}; -use cast::{transmute, transmute_mut_region, transmute_mut_unsafe}; -use clone::Clone; -use unstable::raw; -use super::sleeper_list::SleeperList; -use super::stack::{StackPool}; -use super::rtio::EventLoop; -use super::context::Context; -use super::task::{Task, AnySched, Sched}; -use rt::kill::BlockedTask; -use rt::deque; -use rt::local_ptr; -use rt::local::Local; -use rt::rtio::{RemoteCallback, PausableIdleCallback, Callback}; -use borrow::{to_uint}; -use rand::{XorShiftRng, Rng, Rand}; -use iter::range; -use unstable::mutex::Mutex; -use vec::{OwnedVector}; - -use mpsc = super::mpsc_queue; +use std::cast; +use std::rand::{XorShiftRng, Rng, Rand}; +use std::rt::local::Local; +use std::rt::rtio::{RemoteCallback, PausableIdleCallback, Callback, EventLoop}; +use std::rt::task::BlockedTask; +use std::rt::task::Task; +use std::sync::deque; +use std::unstable::mutex::Mutex; +use std::unstable::raw; +use mpsc = std::sync::mpsc_queue; + +use context::Context; +use coroutine::Coroutine; +use sleeper_list::SleeperList; +use stack::StackPool; +use task::{TypeSched, GreenTask, HomeSched, AnySched}; /// A scheduler is responsible for coordinating the execution of Tasks /// on a single thread. The scheduler runs inside a slightly modified @@ -39,11 +34,15 @@ use mpsc = super::mpsc_queue; /// XXX: This creates too many callbacks to run_sched_once, resulting /// in too much allocation and too many events. pub struct Scheduler { + /// ID number of the pool that this scheduler is a member of. When + /// reawakening green tasks, this is used to ensure that tasks aren't + /// reawoken on the wrong pool of schedulers. + pool_id: uint, /// There are N work queues, one per scheduler. - work_queue: deque::Worker<~Task>, + work_queue: deque::Worker<~GreenTask>, /// Work queues for the other schedulers. These are created by /// cloning the core work queues. - work_queues: ~[deque::Stealer<~Task>], + work_queues: ~[deque::Stealer<~GreenTask>], /// The queue of incoming messages from other schedulers. /// These are enqueued by SchedHandles after which a remote callback /// is triggered to handle the message. @@ -66,15 +65,15 @@ pub struct Scheduler { stack_pool: StackPool, /// The scheduler runs on a special task. When it is not running /// it is stored here instead of the work queue. - sched_task: Option<~Task>, + sched_task: Option<~GreenTask>, /// An action performed after a context switch on behalf of the /// code running before the context switch cleanup_job: Option, - /// Should this scheduler run any task, or only pinned tasks? - run_anything: bool, /// If the scheduler shouldn't run some tasks, a friend to send /// them to. friend_handle: Option, + /// Should this scheduler run any task, or only pinned tasks? + run_anything: bool, /// A fast XorShift rng for scheduler use rng: XorShiftRng, /// A togglable idle callback @@ -117,21 +116,22 @@ impl Scheduler { // * Initialization Functions - pub fn new(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], + pub fn new(pool_id: uint, + event_loop: ~EventLoop, + work_queue: deque::Worker<~GreenTask>, + work_queues: ~[deque::Stealer<~GreenTask>], sleeper_list: SleeperList) -> Scheduler { - Scheduler::new_special(event_loop, work_queue, - work_queues, + Scheduler::new_special(pool_id, event_loop, work_queue, work_queues, sleeper_list, true, None) } - pub fn new_special(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], + pub fn new_special(pool_id: uint, + event_loop: ~EventLoop, + work_queue: deque::Worker<~GreenTask>, + work_queues: ~[deque::Stealer<~GreenTask>], sleeper_list: SleeperList, run_anything: bool, friend: Option) @@ -139,6 +139,7 @@ impl Scheduler { let (consumer, producer) = mpsc::queue(()); let mut sched = Scheduler { + pool_id: pool_id, sleeper_list: sleeper_list, message_queue: consumer, message_producer: producer, @@ -170,66 +171,58 @@ impl Scheduler { // Take a main task to run, and a scheduler to run it in. Create a // scheduler task and bootstrap into it. - pub fn bootstrap(mut ~self, task: ~Task) { + pub fn bootstrap(mut ~self, task: ~GreenTask) { // Build an Idle callback. let cb = ~SchedRunner as ~Callback; self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb)); - // Initialize the TLS key. - local_ptr::init(); - // Create a task for the scheduler with an empty context. - let sched_task = ~Task::new_sched_task(); - - // Now that we have an empty task struct for the scheduler - // task, put it in TLS. - Local::put(sched_task); + let sched_task = GreenTask::new_typed(Some(Coroutine::empty()), + TypeSched); // Before starting our first task, make sure the idle callback // is active. As we do not start in the sleep state this is // important. self.idle_callback.get_mut_ref().resume(); - // Now, as far as all the scheduler state is concerned, we are - // inside the "scheduler" context. So we can act like the - // scheduler and resume the provided task. - self.resume_task_immediately(task); + // Now, as far as all the scheduler state is concerned, we are inside + // the "scheduler" context. So we can act like the scheduler and resume + // the provided task. Let it think that the currently running task is + // actually the sched_task so it knows where to squirrel it away. + let mut sched_task = self.resume_task_immediately(sched_task, task); // Now we are back in the scheduler context, having // successfully run the input task. Start by running the // scheduler. Grab it out of TLS - performing the scheduler // action will have given it away. - let sched: ~Scheduler = Local::take(); - + let sched = sched_task.sched.take_unwrap(); rtdebug!("starting scheduler {}", sched.sched_id()); - sched.run(); + let mut sched_task = sched.run(sched_task); // Close the idle callback. - let mut sched: ~Scheduler = Local::take(); + let mut sched = sched_task.sched.take_unwrap(); sched.idle_callback.take(); // Make one go through the loop to run the close callback. - sched.run(); + let mut stask = sched.run(sched_task); // Now that we are done with the scheduler, clean up the // scheduler task. Do so by removing it from TLS and manually // cleaning up the memory it uses. As we didn't actually call // task.run() on the scheduler task we never get through all // the cleanup code it runs. - let mut stask: ~Task = Local::take(); - rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id()); // Should not have any messages let message = stask.sched.get_mut_ref().message_queue.pop(); rtassert!(match message { mpsc::Empty => true, _ => false }); - stask.destroyed = true; + stask.task.get_mut_ref().destroyed = true; } // This does not return a scheduler, as the scheduler is placed // inside the task. - pub fn run(mut ~self) { + pub fn run(mut ~self, stask: ~GreenTask) -> ~GreenTask { // This is unsafe because we need to place the scheduler, with // the event_loop inside, inside our task. But we still need a @@ -237,16 +230,24 @@ impl Scheduler { // command. unsafe { let event_loop: *mut ~EventLoop = &mut self.event_loop; - - { - // Our scheduler must be in the task before the event loop - // is started. - let mut stask = Local::borrow(None::); - stask.get().sched = Some(self); - } - + // Our scheduler must be in the task before the event loop + // is started. + stask.put_with_sched(self); (*event_loop).run(); } + + // This is a serious code smell, but this function could be done away + // with if necessary. The ownership of `stask` was transferred into + // local storage just before the event loop ran, so it is possible to + // transmute `stask` as a uint across the running of the event loop to + // re-acquire ownership here. + // + // This would involve removing the Task from TLS, removing the runtime, + // forgetting the runtime, and then putting the task into `stask`. For + // now, because we have `GreenTask::convert`, I chose to take this + // method for cleanliness. This function is *not* a fundamental reason + // why this function should exist. + GreenTask::convert(Local::take()) } // * Execution Functions - Core Loop Logic @@ -257,38 +258,37 @@ impl Scheduler { // you reach the end and sleep. In the case that a scheduler // action is performed the loop is evented such that this function // is called again. - fn run_sched_once() { - - // When we reach the scheduler context via the event loop we - // already have a scheduler stored in our local task, so we - // start off by taking it. This is the only path through the - // scheduler where we get the scheduler this way. - let mut sched: ~Scheduler = Local::take(); + fn run_sched_once(mut ~self, stask: ~GreenTask) { + // Make sure that we're not lying in that the `stask` argument is indeed + // the scheduler task for this scheduler. + assert!(self.sched_task.is_none()); // Assume that we need to continue idling unless we reach the // end of this function without performing an action. - sched.idle_callback.get_mut_ref().resume(); + self.idle_callback.get_mut_ref().resume(); // First we check for scheduler messages, these are higher // priority than regular tasks. - let sched = match sched.interpret_message_queue(DontTryTooHard) { - Some(sched) => sched, - None => return - }; + let (sched, stask) = + match self.interpret_message_queue(stask, DontTryTooHard) { + Some(pair) => pair, + None => return + }; // This helper will use a randomized work-stealing algorithm // to find work. - let sched = match sched.do_work() { - Some(sched) => sched, + let (sched, stask) = match sched.do_work(stask) { + Some(pair) => pair, None => return }; // Now, before sleeping we need to find out if there really // were any messages. Give it your best! - let mut sched = match sched.interpret_message_queue(GiveItYourBest) { - Some(sched) => sched, - None => return - }; + let (mut sched, stask) = + match sched.interpret_message_queue(stask, GiveItYourBest) { + Some(pair) => pair, + None => return + }; // If we got here then there was no work to do. // Generate a SchedHandle and push it to the sleeper list so @@ -309,14 +309,17 @@ impl Scheduler { // Finished a cycle without using the Scheduler. Place it back // in TLS. - Local::put(sched); + stask.put_with_sched(sched); } // This function returns None if the scheduler is "used", or it // returns the still-available scheduler. At this point all // message-handling will count as a turn of work, and as a result // return None. - fn interpret_message_queue(mut ~self, effort: EffortLevel) -> Option<~Scheduler> { + fn interpret_message_queue(mut ~self, stask: ~GreenTask, + effort: EffortLevel) + -> Option<(~Scheduler, ~GreenTask)> + { let msg = if effort == DontTryTooHard { self.message_queue.casual_pop() @@ -345,24 +348,25 @@ impl Scheduler { match msg { Some(PinnedTask(task)) => { let mut task = task; - task.give_home(Sched(self.make_handle())); - self.resume_task_immediately(task); + task.give_home(HomeSched(self.make_handle())); + self.resume_task_immediately(stask, task).put(); return None; } Some(TaskFromFriend(task)) => { rtdebug!("got a task from a friend. lovely!"); - self.process_task(task, Scheduler::resume_task_immediately_cl); + self.process_task(stask, task, + Scheduler::resume_task_immediately_cl); return None; } Some(RunOnce(task)) => { // bypass the process_task logic to force running this task once // on this home scheduler. This is often used for I/O (homing). - Scheduler::resume_task_immediately_cl(self, task); + self.resume_task_immediately(stask, task).put(); return None; } Some(Wake) => { self.sleepy = false; - Local::put(self); + stask.put_with_sched(self); return None; } Some(Shutdown) => { @@ -385,26 +389,31 @@ impl Scheduler { // event loop references we will shut down. self.no_sleep = true; self.sleepy = false; - Local::put(self); + stask.put_with_sched(self); return None; } + Some(NewNeighbor(neighbor)) => { + self.work_queues.push(neighbor); + return Some((self, stask)); + } None => { - return Some(self); + return Some((self, stask)); } } } - fn do_work(mut ~self) -> Option<~Scheduler> { + fn do_work(mut ~self, stask: ~GreenTask) -> Option<(~Scheduler, ~GreenTask)> { rtdebug!("scheduler calling do work"); match self.find_work() { Some(task) => { - rtdebug!("found some work! processing the task"); - self.process_task(task, Scheduler::resume_task_immediately_cl); + rtdebug!("found some work! running the task"); + self.process_task(stask, task, + Scheduler::resume_task_immediately_cl); return None; } None => { rtdebug!("no work was found, returning the scheduler struct"); - return Some(self); + return Some((self, stask)); } } } @@ -418,7 +427,7 @@ impl Scheduler { // First step in the process is to find a task. This function does // that by first checking the local queue, and if there is no work // there, trying to steal from the remote work queues. - fn find_work(&mut self) -> Option<~Task> { + fn find_work(&mut self) -> Option<~GreenTask> { rtdebug!("scheduler looking for work"); if !self.steal_for_yield { match self.work_queue.pop() { @@ -456,7 +465,7 @@ impl Scheduler { // Try stealing from all queues the scheduler knows about. This // naive implementation can steal from our own queue or from other // special schedulers. - fn try_steals(&mut self) -> Option<~Task> { + fn try_steals(&mut self) -> Option<~GreenTask> { let work_queues = &mut self.work_queues; let len = work_queues.len(); let start_index = self.rng.gen_range(0, len); @@ -476,53 +485,48 @@ impl Scheduler { // * Task Routing Functions - Make sure tasks send up in the right // place. - fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) { + fn process_task(mut ~self, cur: ~GreenTask, + mut next: ~GreenTask, schedule_fn: SchedulingFn) { rtdebug!("processing a task"); - let home = task.take_unwrap_home(); - match home { - Sched(home_handle) => { + match next.take_unwrap_home() { + HomeSched(home_handle) => { if home_handle.sched_id != self.sched_id() { rtdebug!("sending task home"); - task.give_home(Sched(home_handle)); - Scheduler::send_task_home(task); - Local::put(self); + next.give_home(HomeSched(home_handle)); + Scheduler::send_task_home(next); + cur.put_with_sched(self); } else { rtdebug!("running task here"); - task.give_home(Sched(home_handle)); - schedule_fn(self, task); + next.give_home(HomeSched(home_handle)); + schedule_fn(self, cur, next); } } AnySched if self.run_anything => { rtdebug!("running anysched task here"); - task.give_home(AnySched); - schedule_fn(self, task); + next.give_home(AnySched); + schedule_fn(self, cur, next); } AnySched => { rtdebug!("sending task to friend"); - task.give_home(AnySched); - self.send_to_friend(task); - Local::put(self); + next.give_home(AnySched); + self.send_to_friend(next); + cur.put_with_sched(self); } } } - fn send_task_home(task: ~Task) { + fn send_task_home(task: ~GreenTask) { let mut task = task; - let mut home = task.take_unwrap_home(); - match home { - Sched(ref mut home_handle) => { - home_handle.send(PinnedTask(task)); - } - AnySched => { - rtabort!("error: cannot send anysched task home"); - } + match task.take_unwrap_home() { + HomeSched(mut home_handle) => home_handle.send(PinnedTask(task)), + AnySched => rtabort!("error: cannot send anysched task home"), } } /// Take a non-homed task we aren't allowed to run here and send /// it to the designated friend scheduler to execute. - fn send_to_friend(&mut self, task: ~Task) { + fn send_to_friend(&mut self, task: ~GreenTask) { rtdebug!("sending a task to friend"); match self.friend_handle { Some(ref mut handle) => { @@ -539,9 +543,10 @@ impl Scheduler { /// Pushes the task onto the work stealing queue and tells the /// event loop to run it later. Always use this instead of pushing /// to the work queue directly. - pub fn enqueue_task(&mut self, task: ~Task) { + pub fn enqueue_task(&mut self, task: ~GreenTask) { // We push the task onto our local queue clone. + assert!(!task.is_sched()); self.work_queue.push(task); self.idle_callback.get_mut_ref().resume(); @@ -557,47 +562,31 @@ impl Scheduler { }; } - /// As enqueue_task, but with the possibility for the blocked task to - /// already have been killed. - pub fn enqueue_blocked_task(&mut self, blocked_task: BlockedTask) { - blocked_task.wake().map(|task| self.enqueue_task(task)); - } - // * Core Context Switching Functions // The primary function for changing contexts. In the current // design the scheduler is just a slightly modified GreenTask, so - // all context swaps are from Task to Task. The only difference + // all context swaps are from GreenTask to GreenTask. The only difference // between the various cases is where the inputs come from, and // what is done with the resulting task. That is specified by the // cleanup function f, which takes the scheduler and the // old task as inputs. pub fn change_task_context(mut ~self, - next_task: ~Task, - f: |&mut Scheduler, ~Task|) { - // The current task is grabbed from TLS, not taken as an input. - // Doing an unsafe_take to avoid writing back a null pointer - - // We're going to call `put` later to do that. - let current_task: ~Task = unsafe { Local::unsafe_take() }; - - // Check that the task is not in an atomically() section (e.g., - // holding a pthread mutex, which could deadlock the scheduler). - current_task.death.assert_may_sleep(); - - // These transmutes do something fishy with a closure. - let f_fake_region = unsafe { - transmute::<|&mut Scheduler, ~Task|, - |&mut Scheduler, ~Task|>(f) + current_task: ~GreenTask, + mut next_task: ~GreenTask, + f: |&mut Scheduler, ~GreenTask|) -> ~GreenTask { + let f_opaque = ClosureConverter::from_fn(f); + + let current_task_dupe = unsafe { + *cast::transmute::<&~GreenTask, &uint>(¤t_task) }; - let f_opaque = ClosureConverter::from_fn(f_fake_region); // The current task is placed inside an enum with the cleanup // function. This enum is then placed inside the scheduler. self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque)); // The scheduler is then placed inside the next task. - let mut next_task = next_task; next_task.sched = Some(self); // However we still need an internal mutable pointer to the @@ -607,12 +596,12 @@ impl Scheduler { unsafe { let sched: &mut Scheduler = - transmute_mut_region(*next_task.sched.get_mut_ref()); + cast::transmute_mut_region(*next_task.sched.get_mut_ref()); - let current_task: &mut Task = match sched.cleanup_job { + let current_task: &mut GreenTask = match sched.cleanup_job { Some(CleanupJob { task: ref task, .. }) => { - let task_ptr: *~Task = task; - transmute_mut_region(*transmute_mut_unsafe(task_ptr)) + let task_ptr: *~GreenTask = task; + cast::transmute_mut_region(*cast::transmute_mut_unsafe(task_ptr)) } None => { rtabort!("no cleanup job"); @@ -626,7 +615,7 @@ impl Scheduler { // works because due to transmute the borrow checker // believes that we have no internal pointers to // next_task. - Local::put(next_task); + cast::forget(next_task); // The raw context swap operation. The next action taken // will be running the cleanup job from the context of the @@ -637,16 +626,19 @@ impl Scheduler { // When the context swaps back to this task we immediately // run the cleanup job, as expected by the previously called // swap_contexts function. + let mut current_task: ~GreenTask = unsafe { + cast::transmute(current_task_dupe) + }; + current_task.sched.get_mut_ref().run_cleanup_job(); + + // See the comments in switch_running_tasks_and_then for why a lock + // is acquired here. This is the resumption points and the "bounce" + // that it is referring to. unsafe { - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.get_mut_ref().run_cleanup_job(); - - // See the comments in switch_running_tasks_and_then for why a lock - // is acquired here. This is the resumption points and the "bounce" - // that it is referring to. - (*task).nasty_deschedule_lock.lock(); - (*task).nasty_deschedule_lock.unlock(); + current_task.nasty_deschedule_lock.lock(); + current_task.nasty_deschedule_lock.unlock(); } + return current_task; } // Returns a mutable reference to both contexts involved in this @@ -654,37 +646,33 @@ impl Scheduler { // references to keep even when we don't own the tasks. It looks // kinda safe because we are doing transmutes before passing in // the arguments. - pub fn get_contexts<'a>(current_task: &mut Task, next_task: &mut Task) -> + pub fn get_contexts<'a>(current_task: &mut GreenTask, next_task: &mut GreenTask) -> (&'a mut Context, &'a mut Context) { let current_task_context = &mut current_task.coroutine.get_mut_ref().saved_context; let next_task_context = &mut next_task.coroutine.get_mut_ref().saved_context; unsafe { - (transmute_mut_region(current_task_context), - transmute_mut_region(next_task_context)) + (cast::transmute_mut_region(current_task_context), + cast::transmute_mut_region(next_task_context)) } } // * Context Swapping Helpers - Here be ugliness! - pub fn resume_task_immediately(~self, task: ~Task) { - self.change_task_context(task, |sched, stask| { + pub fn resume_task_immediately(~self, cur: ~GreenTask, + next: ~GreenTask) -> ~GreenTask { + assert!(cur.is_sched()); + self.change_task_context(cur, next, |sched, stask| { + assert!(sched.sched_task.is_none()); sched.sched_task = Some(stask); }) } fn resume_task_immediately_cl(sched: ~Scheduler, - task: ~Task) { - sched.resume_task_immediately(task) - } - - - pub fn resume_blocked_task_immediately(~self, blocked_task: BlockedTask) { - match blocked_task.wake() { - Some(task) => { self.resume_task_immediately(task); } - None => Local::put(self) - }; + cur: ~GreenTask, + next: ~GreenTask) { + sched.resume_task_immediately(cur, next).put() } /// Block a running task, context switch to the scheduler, then pass the @@ -709,15 +697,18 @@ impl Scheduler { /// guaranteed that this function will not return before the given closure /// has returned. pub fn deschedule_running_task_and_then(mut ~self, + cur: ~GreenTask, f: |&mut Scheduler, BlockedTask|) { // Trickier - we need to get the scheduler task out of self // and use it as the destination. let stask = self.sched_task.take_unwrap(); // Otherwise this is the same as below. - self.switch_running_tasks_and_then(stask, f); + self.switch_running_tasks_and_then(cur, stask, f) } - pub fn switch_running_tasks_and_then(~self, next_task: ~Task, + pub fn switch_running_tasks_and_then(~self, + cur: ~GreenTask, + next: ~GreenTask, f: |&mut Scheduler, BlockedTask|) { // And here comes one of the sad moments in which a lock is used in a // core portion of the rust runtime. As always, this is highly @@ -733,80 +724,99 @@ impl Scheduler { // task-local lock around this block. The resumption of the task in // context switching will bounce on the lock, thereby waiting for this // block to finish, eliminating the race mentioned above. + // fail!("should never return!"); // // To actually maintain a handle to the lock, we use an unsafe pointer // to it, but we're guaranteed that the task won't exit until we've // unlocked the lock so there's no worry of this memory going away. - self.change_task_context(next_task, |sched, mut task| { + let cur = self.change_task_context(cur, next, |sched, mut task| { let lock: *mut Mutex = &mut task.nasty_deschedule_lock; unsafe { (*lock).lock() } - f(sched, BlockedTask::block(task)); + f(sched, BlockedTask::block(task.swap())); unsafe { (*lock).unlock() } - }) + }); + cur.put(); } - fn switch_task(sched: ~Scheduler, task: ~Task) { - sched.switch_running_tasks_and_then(task, |sched, last_task| { - sched.enqueue_blocked_task(last_task); - }); + fn switch_task(sched: ~Scheduler, cur: ~GreenTask, next: ~GreenTask) { + sched.change_task_context(cur, next, |sched, last_task| { + if last_task.is_sched() { + assert!(sched.sched_task.is_none()); + sched.sched_task = Some(last_task); + } else { + sched.enqueue_task(last_task); + } + }).put() } // * Task Context Helpers /// Called by a running task to end execution, after which it will /// be recycled by the scheduler for reuse in a new task. - pub fn terminate_current_task(mut ~self) { + pub fn terminate_current_task(mut ~self, cur: ~GreenTask) { // Similar to deschedule running task and then, but cannot go through // the task-blocking path. The task is already dying. let stask = self.sched_task.take_unwrap(); - self.change_task_context(stask, |sched, mut dead_task| { + let _cur = self.change_task_context(cur, stask, |sched, mut dead_task| { let coroutine = dead_task.coroutine.take_unwrap(); coroutine.recycle(&mut sched.stack_pool); - }) + }); + fail!("should never return!"); } - pub fn run_task(task: ~Task) { - let sched: ~Scheduler = Local::take(); - sched.process_task(task, Scheduler::switch_task); + pub fn run_task(~self, cur: ~GreenTask, next: ~GreenTask) { + self.process_task(cur, next, Scheduler::switch_task); } - pub fn run_task_later(next_task: ~Task) { - let mut sched = Local::borrow(None::); - sched.get().enqueue_task(next_task); + pub fn run_task_later(mut cur: ~GreenTask, next: ~GreenTask) { + let mut sched = cur.sched.take_unwrap(); + sched.enqueue_task(next); + cur.put_with_sched(sched); } /// Yield control to the scheduler, executing another task. This is guaranteed /// to introduce some amount of randomness to the scheduler. Currently the /// randomness is a result of performing a round of work stealing (which /// may end up stealing from the current scheduler). - pub fn yield_now(mut ~self) { - self.yield_check_count = reset_yield_check(&mut self.rng); - // Tell the scheduler to start stealing on the next iteration - self.steal_for_yield = true; - self.deschedule_running_task_and_then(|sched, task| { - sched.enqueue_blocked_task(task); - }) + pub fn yield_now(mut ~self, cur: ~GreenTask) { + // Async handles trigger the scheduler by calling yield_now on the local + // task, which eventually gets us to here. See comments in SchedRunner + // for more info on this. + if cur.is_sched() { + assert!(self.sched_task.is_none()); + self.run_sched_once(cur); + } else { + self.yield_check_count = reset_yield_check(&mut self.rng); + // Tell the scheduler to start stealing on the next iteration + self.steal_for_yield = true; + let stask = self.sched_task.take_unwrap(); + let cur = self.change_task_context(cur, stask, |sched, task| { + sched.enqueue_task(task); + }); + cur.put() + } } - pub fn maybe_yield(mut ~self) { - // The number of times to do the yield check before yielding, chosen arbitrarily. + pub fn maybe_yield(mut ~self, cur: ~GreenTask) { + // The number of times to do the yield check before yielding, chosen + // arbitrarily. rtassert!(self.yield_check_count > 0); self.yield_check_count -= 1; if self.yield_check_count == 0 { - self.yield_now(); + self.yield_now(cur); } else { - Local::put(self); + cur.put_with_sched(self); } } // * Utility Functions - pub fn sched_id(&self) -> uint { to_uint(self) } + pub fn sched_id(&self) -> uint { unsafe { cast::transmute(self) } } pub fn run_cleanup_job(&mut self) { let cleanup_job = self.cleanup_job.take_unwrap(); - cleanup_job.run(self); + cleanup_job.run(self) } pub fn make_handle(&mut self) -> SchedHandle { @@ -816,20 +826,21 @@ impl Scheduler { remote: remote, queue: self.message_producer.clone(), sched_id: self.sched_id() - }; + } } } // Supporting types -type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task); +type SchedulingFn = extern "Rust" fn (~Scheduler, ~GreenTask, ~GreenTask); pub enum SchedMessage { Wake, Shutdown, - PinnedTask(~Task), - TaskFromFriend(~Task), - RunOnce(~Task), + NewNeighbor(deque::Stealer<~GreenTask>), + PinnedTask(~GreenTask), + TaskFromFriend(~GreenTask), + RunOnce(~GreenTask), } pub struct SchedHandle { @@ -849,17 +860,28 @@ struct SchedRunner; impl Callback for SchedRunner { fn call(&mut self) { - Scheduler::run_sched_once(); + // In theory, this function needs to invoke the `run_sched_once` + // function on the scheduler. Sadly, we have no context here, except for + // knowledge of the local `Task`. In order to avoid a call to + // `GreenTask::convert`, we just call `yield_now` and the scheduler will + // detect when a sched task performs a yield vs a green task performing + // a yield (and act accordingly). + // + // This function could be converted to `GreenTask::convert` if + // absolutely necessary, but for cleanliness it is much better to not + // use the conversion function. + let task: ~Task = Local::take(); + task.yield_now(); } } struct CleanupJob { - task: ~Task, + task: ~GreenTask, f: UnsafeTaskReceiver } impl CleanupJob { - pub fn new(task: ~Task, f: UnsafeTaskReceiver) -> CleanupJob { + pub fn new(task: ~GreenTask, f: UnsafeTaskReceiver) -> CleanupJob { CleanupJob { task: task, f: f @@ -876,14 +898,16 @@ impl CleanupJob { // complaining type UnsafeTaskReceiver = raw::Closure; trait ClosureConverter { - fn from_fn(|&mut Scheduler, ~Task|) -> Self; - fn to_fn(self) -> |&mut Scheduler, ~Task|; + fn from_fn(|&mut Scheduler, ~GreenTask|) -> Self; + fn to_fn(self) -> |&mut Scheduler, ~GreenTask|; } impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: |&mut Scheduler, ~Task|) -> UnsafeTaskReceiver { - unsafe { transmute(f) } + fn from_fn(f: |&mut Scheduler, ~GreenTask|) -> UnsafeTaskReceiver { + unsafe { cast::transmute(f) } + } + fn to_fn(self) -> |&mut Scheduler, ~GreenTask| { + unsafe { cast::transmute(self) } } - fn to_fn(self) -> |&mut Scheduler, ~Task| { unsafe { transmute(self) } } } // On unix, we read randomness straight from /dev/urandom, but the @@ -897,12 +921,9 @@ fn new_sched_rng() -> XorShiftRng { } #[cfg(unix)] fn new_sched_rng() -> XorShiftRng { - use libc; - use mem; - use c_str::ToCStr; - use vec::MutableVector; - use iter::Iterator; - use rand::SeedableRng; + use std::libc; + use std::mem; + use std::rand::SeedableRng; let fd = "/dev/urandom".with_c_str(|name| { unsafe { libc::open(name, libc::O_RDONLY, 0) } @@ -933,24 +954,47 @@ fn new_sched_rng() -> XorShiftRng { #[cfg(test)] mod test { - use prelude::*; - - use borrow::to_uint; - use rt::deque::BufferPool; - use rt::basic; - use rt::sched::{Scheduler}; - use rt::task::{Task, Sched}; - use rt::test::*; - use rt::thread::Thread; - use rt::util; - use task::TaskResult; - use unstable::run_in_bare_thread; + use std::task::TaskOpts; + use std::rt::Runtime; + use std::rt::task::Task; + use std::rt::local::Local; + + use basic; + use sched::{TaskFromFriend, PinnedTask}; + use task::{GreenTask, HomeSched}; + use PoolConfig; + use SchedPool; + + fn pool() -> SchedPool { + SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: Some(basic::event_loop), + }) + } + + fn run(f: proc()) { + let mut pool = pool(); + pool.spawn(TaskOpts::new(), f); + pool.shutdown(); + } + + fn sched_id() -> uint { + let mut task = Local::borrow(None::); + match task.get().maybe_take_runtime::() { + Some(green) => { + let ret = green.sched.get_ref().sched_id(); + task.get().put_runtime(green as ~Runtime); + return ret; + } + None => fail!() + } + } #[test] fn trivial_run_in_newsched_task_test() { let mut task_ran = false; let task_ran_ptr: *mut bool = &mut task_ran; - do run_in_newsched_task || { + do run { unsafe { *task_ran_ptr = true }; rtdebug!("executed from the new scheduler") } @@ -962,9 +1006,11 @@ mod test { let total = 10; let mut task_run_count = 0; let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { + // with only one thread this is safe to run in without worries of + // contention. + do run { for _ in range(0u, total) { - do spawntask || { + do spawn || { unsafe { *task_run_count_ptr = *task_run_count_ptr + 1}; } } @@ -976,12 +1022,12 @@ mod test { fn multiple_task_nested_test() { let mut task_run_count = 0; let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { - do spawntask || { + do run { + do spawn { unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { + do spawn { unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { + do spawn { unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; } } @@ -990,52 +1036,33 @@ mod test { assert!(task_run_count == 3); } - // Confirm that a sched_id actually is the uint form of the - // pointer to the scheduler struct. - #[test] - fn simple_sched_id_test() { - do run_in_bare_thread { - let sched = ~new_test_uv_sched(); - assert!(to_uint(sched) == sched.sched_id()); - } - } - - // Compare two scheduler ids that are different, this should never - // fail but may catch a mistake someday. - #[test] - fn compare_sched_id_test() { - do run_in_bare_thread { - let sched_one = ~new_test_uv_sched(); - let sched_two = ~new_test_uv_sched(); - assert!(sched_one.sched_id() != sched_two.sched_id()); - } - } - - // A very simple test that confirms that a task executing on the // home scheduler notices that it is home. #[test] fn test_home_sched() { - do run_in_bare_thread { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; + let mut pool = pool(); - let mut sched = ~new_test_uv_sched(); - let sched_handle = sched.make_handle(); + let (dport, dchan) = Chan::new(); + { + let (port, chan) = Chan::new(); + let mut handle1 = pool.spawn_sched(); + let mut handle2 = pool.spawn_sched(); - let mut task = ~do Task::new_root_homed(&mut sched.stack_pool, None, - Sched(sched_handle)) { - unsafe { *task_ran_ptr = true }; - assert!(Task::on_appropriate_sched()); - }; + handle1.send(TaskFromFriend(do pool.task(TaskOpts::new()) { + chan.send(sched_id()); + })); + let sched1_id = port.recv(); - let on_exit: proc(TaskResult) = proc(exit_status) { - rtassert!(exit_status.is_ok()) + let mut task = do pool.task(TaskOpts::new()) { + assert_eq!(sched_id(), sched1_id); + dchan.send(()); }; - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); + task.give_home(HomeSched(handle1)); + handle2.send(TaskFromFriend(task)); } + dport.recv(); + + pool.shutdown(); } // An advanced test that checks all four possible states that a @@ -1043,12 +1070,13 @@ mod test { #[test] fn test_schedule_home_states() { - use rt::sleeper_list::SleeperList; - use rt::sched::Shutdown; - use borrow; + use sleeper_list::SleeperList; + use super::{Shutdown, Scheduler, SchedHandle}; + use std::unstable::run_in_bare_thread; + use std::rt::thread::Thread; + use std::sync::deque::BufferPool; do run_in_bare_thread { - let sleepers = SleeperList::new(); let mut pool = BufferPool::new(); let (normal_worker, normal_stealer) = pool.deque(); @@ -1057,17 +1085,18 @@ mod test { // Our normal scheduler let mut normal_sched = ~Scheduler::new( + 1, basic::event_loop(), normal_worker, queues.clone(), sleepers.clone()); let normal_handle = normal_sched.make_handle(); - let friend_handle = normal_sched.make_handle(); // Our special scheduler let mut special_sched = ~Scheduler::new_special( + 1, basic::event_loop(), special_worker, queues.clone(), @@ -1086,35 +1115,61 @@ mod test { // 3) task not homed, sched requeues // 4) task not home, send home - let task1 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t1_handle)) || { - rtassert!(Task::on_appropriate_sched()); + // Grab both the scheduler and the task from TLS and check if the + // task is executing on an appropriate scheduler. + fn on_appropriate_sched() -> bool { + use task::{TypeGreen, TypeSched, HomeSched}; + let task = GreenTask::convert(Local::take()); + let sched_id = task.sched.get_ref().sched_id(); + let run_any = task.sched.get_ref().run_anything; + let ret = match task.task_type { + TypeGreen(Some(AnySched)) => { + run_any + } + TypeGreen(Some(HomeSched(SchedHandle { + sched_id: ref id, + .. + }))) => { + *id == sched_id + } + TypeGreen(None) => { fail!("task without home"); } + TypeSched => { fail!("expected green task"); } + }; + task.put(); + ret + } + + let task1 = do GreenTask::new_homed(&mut special_sched.stack_pool, + None, HomeSched(t1_handle)) { + rtassert!(on_appropriate_sched()); }; - rtdebug!("task1 id: **{}**", borrow::to_uint(task1)); - let task2 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); + let task2 = do GreenTask::new(&mut normal_sched.stack_pool, None) { + rtassert!(on_appropriate_sched()); }; - let task3 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); + let task3 = do GreenTask::new(&mut normal_sched.stack_pool, None) { + rtassert!(on_appropriate_sched()); }; - let task4 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t4_handle)) { - rtassert!(Task::on_appropriate_sched()); + let task4 = do GreenTask::new_homed(&mut special_sched.stack_pool, + None, HomeSched(t4_handle)) { + rtassert!(on_appropriate_sched()); }; - rtdebug!("task4 id: **{}**", borrow::to_uint(task4)); // Signal from the special task that we are done. let (port, chan) = Chan::<()>::new(); - let normal_task = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtdebug!("*about to submit task2*"); - Scheduler::run_task(task2); - rtdebug!("*about to submit task4*"); - Scheduler::run_task(task4); - rtdebug!("*normal_task done*"); + fn run(next: ~GreenTask) { + let mut task = GreenTask::convert(Local::take()); + let sched = task.sched.take_unwrap(); + sched.run_task(task, next) + } + + let normal_task = do GreenTask::new(&mut normal_sched.stack_pool, + None) { + run(task2); + run(task4); port.recv(); let mut nh = normal_handle; nh.send(Shutdown); @@ -1122,29 +1177,23 @@ mod test { sh.send(Shutdown); }; - rtdebug!("normal task: {}", borrow::to_uint(normal_task)); - let special_task = ~do Task::new_root(&mut special_sched.stack_pool, None) { - rtdebug!("*about to submit task1*"); - Scheduler::run_task(task1); - rtdebug!("*about to submit task3*"); - Scheduler::run_task(task3); - rtdebug!("*done with special_task*"); + let special_task = do GreenTask::new(&mut special_sched.stack_pool, + None) { + run(task1); + run(task3); chan.send(()); }; - rtdebug!("special task: {}", borrow::to_uint(special_task)); let normal_sched = normal_sched; let normal_thread = do Thread::start { normal_sched.bootstrap(normal_task); - rtdebug!("finished with normal_thread"); }; let special_sched = special_sched; let special_thread = do Thread::start { special_sched.bootstrap(special_task); - rtdebug!("finished with special_sched"); }; normal_thread.join(); @@ -1152,109 +1201,82 @@ mod test { } } - #[test] - fn test_stress_schedule_task_states() { - if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } - let n = stress_factor() * 120; - for _ in range(0, n as int) { - test_schedule_home_states(); - } - } + //#[test] + //fn test_stress_schedule_task_states() { + // if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } + // let n = stress_factor() * 120; + // for _ in range(0, n as int) { + // test_schedule_home_states(); + // } + //} #[test] fn test_io_callback() { - use io::timer; - - // This is a regression test that when there are no schedulable tasks - // in the work queue, but we are performing I/O, that once we do put - // something in the work queue again the scheduler picks it up and doesn't - // exit before emptying the work queue - do run_in_uv_task { - do spawntask { + use std::io::timer; + + let mut pool = SchedPool::new(PoolConfig { + threads: 2, + event_loop_factory: None, + }); + + // This is a regression test that when there are no schedulable tasks in + // the work queue, but we are performing I/O, that once we do put + // something in the work queue again the scheduler picks it up and + // doesn't exit before emptying the work queue + do pool.spawn(TaskOpts::new()) { + do spawn { timer::sleep(10); } } + + pool.shutdown(); } #[test] - fn handle() { - do run_in_bare_thread { - let (port, chan) = Chan::new(); - - let thread_one = do Thread::start { - let chan = chan; - do run_in_newsched_task_core { - chan.send(()); - } - }; - - let thread_two = do Thread::start { - let port = port; - do run_in_newsched_task_core { - port.recv(); - } - }; + fn wakeup_across_scheds() { + let (port1, chan1) = Chan::new(); + let (port2, chan2) = Chan::new(); + + let mut pool1 = pool(); + let mut pool2 = pool(); + + do pool1.spawn(TaskOpts::new()) { + let id = sched_id(); + chan1.send(()); + port2.recv(); + assert_eq!(id, sched_id()); + } - thread_two.join(); - thread_one.join(); + do pool2.spawn(TaskOpts::new()) { + let id = sched_id(); + port1.recv(); + assert_eq!(id, sched_id()); + chan2.send(()); } + + pool1.shutdown(); + pool2.shutdown(); } // A regression test that the final message is always handled. // Used to deadlock because Shutdown was never recvd. #[test] fn no_missed_messages() { - use rt::sleeper_list::SleeperList; - use rt::stack::StackPool; - use rt::sched::{Shutdown, TaskFromFriend}; - - do run_in_bare_thread { - stress_factor().times(|| { - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = ~Scheduler::new( - basic::event_loop(), - worker, - ~[stealer], - sleepers.clone()); - - let mut handle = sched.make_handle(); - - let sched = sched; - let thread = do Thread::start { - let mut sched = sched; - let bootstrap_task = - ~Task::new_root(&mut sched.stack_pool, - None, - proc()()); - sched.bootstrap(bootstrap_task); - }; - - let mut stack_pool = StackPool::new(); - let task = ~Task::new_root(&mut stack_pool, None, proc()()); - handle.send(TaskFromFriend(task)); + let mut pool = pool(); - handle.send(Shutdown); - drop(handle); + let task = pool.task(TaskOpts::new(), proc()()); + pool.spawn_sched().send(TaskFromFriend(task)); - thread.join(); - }) - } + pool.shutdown(); } #[test] fn multithreading() { - use num::Times; - use vec::OwnedVector; - use container::Container; - - do run_in_mt_newsched_task { + do run { let mut ports = ~[]; 10.times(|| { let (port, chan) = Chan::new(); - do spawntask_later { + do spawn { chan.send(()); } ports.push(port); @@ -1268,7 +1290,7 @@ mod test { #[test] fn thread_ring() { - do run_in_mt_newsched_task { + do run { let (end_port, end_chan) = Chan::new(); let n_tasks = 10; @@ -1281,14 +1303,14 @@ mod test { let (next_p, ch) = Chan::new(); let imm_i = i; let imm_p = p; - do spawntask_random { + do spawn { roundtrip(imm_i, n_tasks, &imm_p, &ch); }; p = next_p; i += 1; } let p = p; - do spawntask_random { + do spawn { roundtrip(1, n_tasks, &p, &ch1); } @@ -1319,22 +1341,20 @@ mod test { #[test] fn start_closure_dtor() { - use ops::Drop; - // Regression test that the `start` task entrypoint can // contain dtors that use task resources - do run_in_newsched_task { + do run { struct S { field: () } impl Drop for S { fn drop(&mut self) { - let _foo = @0; + let _foo = ~0; } } let s = S { field: () }; - do spawntask { + do spawn { let _ss = &s; } } @@ -1344,52 +1364,120 @@ mod test { #[ignore] #[test] fn dont_starve_1() { - stress_factor().times(|| { - do run_in_mt_newsched_task { - let (port, chan) = Chan::new(); - - // This task should not be able to starve the sender; - // The sender should get stolen to another thread. - do spawntask { - while port.try_recv().is_none() { } - } + let mut pool = SchedPool::new(PoolConfig { + threads: 2, // this must be > 1 + event_loop_factory: Some(basic::event_loop), + }); + do pool.spawn(TaskOpts::new()) { + let (port, chan) = Chan::new(); - chan.send(()); + // This task should not be able to starve the sender; + // The sender should get stolen to another thread. + do spawn { + while port.try_recv().is_none() { } } - }) + + chan.send(()); + } + pool.shutdown(); } #[test] fn dont_starve_2() { - stress_factor().times(|| { - do run_in_newsched_task { - let (port, chan) = Chan::new(); - let (_port2, chan2) = Chan::new(); + do run { + let (port, chan) = Chan::new(); + let (_port2, chan2) = Chan::new(); - // This task should not be able to starve the other task. - // The sends should eventually yield. - do spawntask { - while port.try_recv().is_none() { - chan2.send(()); - } + // This task should not be able to starve the other task. + // The sends should eventually yield. + do spawn { + while port.try_recv().is_none() { + chan2.send(()); } - - chan.send(()); } - }) + + chan.send(()); + } } - // Regression test for a logic bug that would cause single-threaded schedulers - // to sleep forever after yielding and stealing another task. + // Regression test for a logic bug that would cause single-threaded + // schedulers to sleep forever after yielding and stealing another task. #[test] fn single_threaded_yield() { - use task::{spawn, spawn_sched, SingleThreaded, deschedule}; - use num::Times; + use std::task::deschedule; + do run { + 5.times(deschedule); + } + } + + #[test] + fn test_spawn_sched_blocking() { + use std::unstable::mutex::Mutex; + + // Testing that a task in one scheduler can block in foreign code + // without affecting other schedulers + for _ in range(0, 20) { + let mut pool = pool(); + let (start_po, start_ch) = Chan::new(); + let (fin_po, fin_ch) = Chan::new(); + + let lock = unsafe { Mutex::new() }; + let lock2 = unsafe { lock.clone() }; + + let mut handle = pool.spawn_sched(); + handle.send(PinnedTask(pool.task(TaskOpts::new(), proc() { + let mut lock = lock2; + unsafe { + lock.lock(); + + start_ch.send(()); + lock.wait(); // block the scheduler thread + lock.signal(); // let them know we have the lock + lock.unlock(); + } + + fin_ch.send(()); + }))); + drop(handle); + + let mut handle = pool.spawn_sched(); + handle.send(TaskFromFriend(pool.task(TaskOpts::new(), proc() { + // Wait until the other task has its lock + start_po.recv(); - do spawn_sched(SingleThreaded) { - 5.times(|| { deschedule(); }) + fn pingpong(po: &Port, ch: &Chan) { + let mut val = 20; + while val > 0 { + val = po.recv(); + ch.try_send(val - 1); + } + } + + let (setup_po, setup_ch) = Chan::new(); + let (parent_po, parent_ch) = Chan::new(); + do spawn { + let (child_po, child_ch) = Chan::new(); + setup_ch.send(child_ch); + pingpong(&child_po, &parent_ch); + }; + + let child_ch = setup_po.recv(); + child_ch.send(20); + pingpong(&parent_po, &child_ch); + unsafe { + let mut lock = lock; + lock.lock(); + lock.signal(); // wakeup waiting scheduler + lock.wait(); // wait for them to grab the lock + lock.unlock(); + lock.destroy(); // now we're guaranteed they have no locks + } + }))); + drop(handle); + + fin_po.recv(); + pool.shutdown(); } - do spawn { } - do spawn { } + } } diff --git a/src/libgreen/simple.rs b/src/libgreen/simple.rs new file mode 100644 index 0000000000000..4f904ee6e6dd9 --- /dev/null +++ b/src/libgreen/simple.rs @@ -0,0 +1,88 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A small module implementing a simple "runtime" used for bootstrapping a rust +//! scheduler pool and then interacting with it. + +use std::cast; +use std::rt::Runtime; +use std::rt::local::Local; +use std::rt::rtio; +use std::rt::task::{Task, BlockedTask}; +use std::task::TaskOpts; +use std::unstable::sync::LittleLock; + +struct SimpleTask { + lock: LittleLock, + awoken: bool, +} + +impl Runtime for SimpleTask { + // Implement the simple tasks of descheduling and rescheduling, but only in + // a simple number of cases. + fn deschedule(mut ~self, times: uint, mut cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>) { + assert!(times == 1); + + let me = &mut *self as *mut SimpleTask; + let cur_dupe = &*cur_task as *Task; + cur_task.put_runtime(self as ~Runtime); + let task = BlockedTask::block(cur_task); + + // See libnative/task.rs for what's going on here with the `awoken` + // field and the while loop around wait() + unsafe { + let mut guard = (*me).lock.lock(); + (*me).awoken = false; + match f(task) { + Ok(()) => { + while !(*me).awoken { + guard.wait(); + } + } + Err(task) => { cast::forget(task.wake()); } + } + drop(guard); + cur_task = cast::transmute(cur_dupe); + } + Local::put(cur_task); + } + fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) { + let me = &mut *self as *mut SimpleTask; + to_wake.put_runtime(self as ~Runtime); + unsafe { + cast::forget(to_wake); + let _l = (*me).lock.lock(); + (*me).awoken = true; + (*me).lock.signal(); + } + } + + // These functions are all unimplemented and fail as a result. This is on + // purpose. A "simple task" is just that, a very simple task that can't + // really do a whole lot. The only purpose of the task is to get us off our + // feet and running. + fn yield_now(~self, _cur_task: ~Task) { fail!() } + fn maybe_yield(~self, _cur_task: ~Task) { fail!() } + fn spawn_sibling(~self, _cur_task: ~Task, _opts: TaskOpts, _f: proc()) { + fail!() + } + fn local_io<'a>(&'a mut self) -> Option> { None } + fn wrap(~self) -> ~Any { fail!() } +} + +pub fn task() -> ~Task { + let mut task = ~Task::new(); + task.put_runtime(~SimpleTask { + lock: LittleLock::new(), + awoken: false, + } as ~Runtime); + return task; +} diff --git a/src/libstd/rt/sleeper_list.rs b/src/libgreen/sleeper_list.rs similarity index 92% rename from src/libstd/rt/sleeper_list.rs rename to src/libgreen/sleeper_list.rs index 39c7431837f6e..5be260efdfaef 100644 --- a/src/libstd/rt/sleeper_list.rs +++ b/src/libgreen/sleeper_list.rs @@ -11,10 +11,9 @@ //! Maintains a shared list of sleeping schedulers. Schedulers //! use this to wake each other up. -use rt::sched::SchedHandle; -use rt::mpmc_bounded_queue::Queue; -use option::*; -use clone::Clone; +use std::sync::mpmc_bounded_queue::Queue; + +use sched::SchedHandle; pub struct SleeperList { priv q: Queue, diff --git a/src/libstd/rt/stack.rs b/src/libgreen/stack.rs similarity index 95% rename from src/libstd/rt/stack.rs rename to src/libgreen/stack.rs index 44b60e955d217..cf2a3d5f1414c 100644 --- a/src/libstd/rt/stack.rs +++ b/src/libgreen/stack.rs @@ -8,11 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use container::Container; -use ptr::RawPtr; -use vec; -use ops::Drop; -use libc::{c_uint, uintptr_t}; +use std::vec; +use std::libc::{c_uint, uintptr_t}; pub struct StackSegment { priv buf: ~[u8], diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs new file mode 100644 index 0000000000000..eff80df2a118e --- /dev/null +++ b/src/libgreen/task.rs @@ -0,0 +1,536 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Green Task implementation +//! +//! This module contains the glue to the libstd runtime necessary to integrate +//! M:N scheduling. This GreenTask structure is hidden as a trait object in all +//! rust tasks and virtual calls are made in order to interface with it. +//! +//! Each green task contains a scheduler if it is currently running, and it also +//! contains the rust task itself in order to juggle around ownership of the +//! values. + +use std::cast; +use std::rt::Runtime; +use std::rt::rtio; +use std::rt::local::Local; +use std::rt::task::{Task, BlockedTask}; +use std::task::TaskOpts; +use std::unstable::mutex::Mutex; + +use coroutine::Coroutine; +use sched::{Scheduler, SchedHandle, RunOnce}; +use stack::StackPool; + +/// The necessary fields needed to keep track of a green task (as opposed to a +/// 1:1 task). +pub struct GreenTask { + coroutine: Option, + handle: Option, + sched: Option<~Scheduler>, + task: Option<~Task>, + task_type: TaskType, + pool_id: uint, + + // See the comments in the scheduler about why this is necessary + nasty_deschedule_lock: Mutex, +} + +pub enum TaskType { + TypeGreen(Option), + TypeSched, +} + +pub enum Home { + AnySched, + HomeSched(SchedHandle), +} + +impl GreenTask { + /// Creates a new green task which is not homed to any particular scheduler + /// and will not have any contained Task structure. + pub fn new(stack_pool: &mut StackPool, + stack_size: Option, + start: proc()) -> ~GreenTask { + GreenTask::new_homed(stack_pool, stack_size, AnySched, start) + } + + /// Creates a new task (like `new`), but specifies the home for new task. + pub fn new_homed(stack_pool: &mut StackPool, + stack_size: Option, + home: Home, + start: proc()) -> ~GreenTask { + let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home))); + let start = GreenTask::build_start_wrapper(start, ops.as_uint()); + ops.coroutine = Some(Coroutine::new(stack_pool, stack_size, start)); + return ops; + } + + /// Creates a new green task with the specified coroutine and type, this is + /// useful when creating scheduler tasks. + pub fn new_typed(coroutine: Option, + task_type: TaskType) -> ~GreenTask { + ~GreenTask { + pool_id: 0, + coroutine: coroutine, + task_type: task_type, + sched: None, + handle: None, + nasty_deschedule_lock: unsafe { Mutex::new() }, + task: Some(~Task::new()), + } + } + + /// Creates a new green task with the given configuration options for the + /// contained Task object. The given stack pool is also used to allocate a + /// new stack for this task. + pub fn configure(pool: &mut StackPool, + opts: TaskOpts, + f: proc()) -> ~GreenTask { + let TaskOpts { + watched: _watched, + notify_chan, name, stack_size + } = opts; + + let mut green = GreenTask::new(pool, stack_size, f); + { + let task = green.task.get_mut_ref(); + task.name = name; + match notify_chan { + Some(chan) => { + let on_exit = proc(task_result) { chan.send(task_result) }; + task.death.on_exit = Some(on_exit); + } + None => {} + } + } + return green; + } + + /// Just like the `maybe_take_runtime` function, this function should *not* + /// exist. Usage of this function is _strongly_ discouraged. This is an + /// absolute last resort necessary for converting a libstd task to a green + /// task. + /// + /// This function will assert that the task is indeed a green task before + /// returning (and will kill the entire process if this is wrong). + pub fn convert(mut task: ~Task) -> ~GreenTask { + match task.maybe_take_runtime::() { + Some(mut green) => { + green.put_task(task); + green + } + None => rtabort!("not a green task any more?"), + } + } + + /// Builds a function which is the actual starting execution point for a + /// rust task. This function is the glue necessary to execute the libstd + /// task and then clean up the green thread after it exits. + /// + /// The second argument to this function is actually a transmuted copy of + /// the `GreenTask` pointer. Context switches in the scheduler silently + /// transfer ownership of the `GreenTask` to the other end of the context + /// switch, so because this is the first code that is running in this task, + /// it must first re-acquire ownership of the green task. + pub fn build_start_wrapper(start: proc(), ops: uint) -> proc() { + proc() { + // First code after swap to this new context. Run our + // cleanup job after we have re-acquired ownership of the green + // task. + let mut task: ~GreenTask = unsafe { GreenTask::from_uint(ops) }; + task.sched.get_mut_ref().run_cleanup_job(); + + // Convert our green task to a libstd task and then execute the code + // requeted. This is the "try/catch" block for this green task and + // is the wrapper for *all* code run in the task. + let mut start = Some(start); + let task = task.swap().run(|| start.take_unwrap()()); + + // Once the function has exited, it's time to run the termination + // routine. This means we need to context switch one more time but + // clean ourselves up on the other end. Since we have no way of + // preserving a handle to the GreenTask down to this point, this + // unfortunately must call `GreenTask::convert`. In order to avoid + // this we could add a `terminate` function to the `Runtime` trait + // in libstd, but that seems less appropriate since the coversion + // method exists. + GreenTask::convert(task).terminate(); + } + } + + pub fn give_home(&mut self, new_home: Home) { + match self.task_type { + TypeGreen(ref mut home) => { *home = Some(new_home); } + TypeSched => rtabort!("type error: used SchedTask as GreenTask"), + } + } + + pub fn take_unwrap_home(&mut self) -> Home { + match self.task_type { + TypeGreen(ref mut home) => home.take_unwrap(), + TypeSched => rtabort!("type error: used SchedTask as GreenTask"), + } + } + + // New utility functions for homes. + + pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool { + match self.task_type { + TypeGreen(Some(AnySched)) => { false } + TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => { + *id == sched.sched_id() + } + TypeGreen(None) => { rtabort!("task without home"); } + TypeSched => { + // Awe yea + rtabort!("type error: expected: TypeGreen, found: TaskSched"); + } + } + } + + pub fn homed(&self) -> bool { + match self.task_type { + TypeGreen(Some(AnySched)) => { false } + TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true } + TypeGreen(None) => { + rtabort!("task without home"); + } + TypeSched => { + rtabort!("type error: expected: TypeGreen, found: TaskSched"); + } + } + } + + pub fn is_sched(&self) -> bool { + match self.task_type { + TypeGreen(..) => false, TypeSched => true, + } + } + + // Unsafe functions for transferring ownership of this GreenTask across + // context switches + + pub fn as_uint(&self) -> uint { + unsafe { cast::transmute(self) } + } + + pub unsafe fn from_uint(val: uint) -> ~GreenTask { cast::transmute(val) } + + // Runtime glue functions and helpers + + pub fn put_with_sched(mut ~self, sched: ~Scheduler) { + assert!(self.sched.is_none()); + self.sched = Some(sched); + self.put(); + } + + pub fn put_task(&mut self, task: ~Task) { + assert!(self.task.is_none()); + self.task = Some(task); + } + + pub fn swap(mut ~self) -> ~Task { + let mut task = self.task.take_unwrap(); + task.put_runtime(self as ~Runtime); + return task; + } + + pub fn put(~self) { + assert!(self.sched.is_some()); + Local::put(self.swap()); + } + + fn terminate(mut ~self) { + let sched = self.sched.take_unwrap(); + sched.terminate_current_task(self); + } + + // This function is used to remotely wakeup this green task back on to its + // original pool of schedulers. In order to do so, each tasks arranges a + // SchedHandle upon descheduling to be available for sending itself back to + // the original pool. + // + // Note that there is an interesting transfer of ownership going on here. We + // must relinquish ownership of the green task, but then also send the task + // over the handle back to the original scheduler. In order to safely do + // this, we leverage the already-present "nasty descheduling lock". The + // reason for doing this is that each task will bounce on this lock after + // resuming after a context switch. By holding the lock over the enqueueing + // of the task, we're guaranteed that the SchedHandle's memory will be valid + // for this entire function. + // + // An alternative would include having incredibly cheaply cloneable handles, + // but right now a SchedHandle is something like 6 allocations, so it is + // *not* a cheap operation to clone a handle. Until the day comes that we + // need to optimize this, a lock should do just fine (it's completely + // uncontended except for when the task is rescheduled). + fn reawaken_remotely(mut ~self) { + unsafe { + let mtx = &mut self.nasty_deschedule_lock as *mut Mutex; + let handle = self.handle.get_mut_ref() as *mut SchedHandle; + (*mtx).lock(); + (*handle).send(RunOnce(self)); + (*mtx).unlock(); + } + } +} + +impl Runtime for GreenTask { + fn yield_now(mut ~self, cur_task: ~Task) { + self.put_task(cur_task); + let sched = self.sched.take_unwrap(); + sched.yield_now(self); + } + + fn maybe_yield(mut ~self, cur_task: ~Task) { + self.put_task(cur_task); + let sched = self.sched.take_unwrap(); + sched.maybe_yield(self); + } + + fn deschedule(mut ~self, times: uint, cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>) { + self.put_task(cur_task); + let mut sched = self.sched.take_unwrap(); + + // In order for this task to be reawoken in all possible contexts, we + // may need a handle back in to the current scheduler. When we're woken + // up in anything other than the local scheduler pool, this handle is + // used to send this task back into the scheduler pool. + if self.handle.is_none() { + self.handle = Some(sched.make_handle()); + self.pool_id = sched.pool_id; + } + + // This code is pretty standard, except for the usage of + // `GreenTask::convert`. Right now if we use `reawaken` directly it will + // expect for there to be a task in local TLS, but that is not true for + // this deschedule block (because the scheduler must retain ownership of + // the task while the cleanup job is running). In order to get around + // this for now, we invoke the scheduler directly with the converted + // Task => GreenTask structure. + if times == 1 { + sched.deschedule_running_task_and_then(self, |sched, task| { + match f(task) { + Ok(()) => {} + Err(t) => { + t.wake().map(|t| { + sched.enqueue_task(GreenTask::convert(t)) + }); + } + } + }); + } else { + sched.deschedule_running_task_and_then(self, |sched, task| { + for task in task.make_selectable(times) { + match f(task) { + Ok(()) => {}, + Err(task) => { + task.wake().map(|t| { + sched.enqueue_task(GreenTask::convert(t)) + }); + break + } + } + } + }); + } + } + + fn reawaken(mut ~self, to_wake: ~Task, can_resched: bool) { + self.put_task(to_wake); + assert!(self.sched.is_none()); + + // Waking up a green thread is a bit of a tricky situation. We have no + // guarantee about where the current task is running. The options we + // have for where this current task is running are: + // + // 1. Our original scheduler pool + // 2. Some other scheduler pool + // 3. Something that isn't a scheduler pool + // + // In order to figure out what case we're in, this is the reason that + // the `maybe_take_runtime` function exists. Using this function we can + // dynamically check to see which of these cases is the current + // situation and then dispatch accordingly. + // + // In case 1, we just use the local scheduler to resume ourselves + // immediately (if a rescheduling is possible). + // + // In case 2 and 3, we need to remotely reawaken ourself in order to be + // transplanted back to the correct scheduler pool. + let mut running_task: ~Task = Local::take(); + match running_task.maybe_take_runtime::() { + Some(mut running_green_task) => { + running_green_task.put_task(running_task); + let mut sched = running_green_task.sched.take_unwrap(); + + if sched.pool_id == self.pool_id { + if can_resched { + sched.run_task(running_green_task, self); + } else { + sched.enqueue_task(self); + running_green_task.put_with_sched(sched); + } + } else { + self.reawaken_remotely(); + + // put that thing back where it came from! + running_green_task.put_with_sched(sched); + } + } + None => { + self.reawaken_remotely(); + Local::put(running_task); + } + } + } + + fn spawn_sibling(mut ~self, cur_task: ~Task, opts: TaskOpts, f: proc()) { + self.put_task(cur_task); + + // Spawns a task into the current scheduler. We allocate the new task's + // stack from the scheduler's stack pool, and then configure it + // accordingly to `opts`. Afterwards we bootstrap it immediately by + // switching to it. + // + // Upon returning, our task is back in TLS and we're good to return. + let mut sched = self.sched.take_unwrap(); + let sibling = GreenTask::configure(&mut sched.stack_pool, opts, f); + sched.run_task(self, sibling) + } + + // Local I/O is provided by the scheduler's event loop + fn local_io<'a>(&'a mut self) -> Option> { + match self.sched.get_mut_ref().event_loop.io() { + Some(io) => Some(rtio::LocalIo::new(io)), + None => None, + } + } + + fn wrap(~self) -> ~Any { self as ~Any } +} + +impl Drop for GreenTask { + fn drop(&mut self) { + unsafe { self.nasty_deschedule_lock.destroy(); } + } +} + +#[cfg(test)] +mod tests { + use std::rt::Runtime; + use std::rt::local::Local; + use std::rt::task::Task; + use std::task; + use std::task::TaskOpts; + + use super::super::{PoolConfig, SchedPool}; + use super::GreenTask; + + fn spawn_opts(opts: TaskOpts, f: proc()) { + let mut pool = SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: None, + }); + pool.spawn(opts, f); + pool.shutdown(); + } + + #[test] + fn smoke() { + let (p, c) = Chan::new(); + do spawn_opts(TaskOpts::new()) { + c.send(()); + } + p.recv(); + } + + #[test] + fn smoke_fail() { + let (p, c) = Chan::<()>::new(); + do spawn_opts(TaskOpts::new()) { + let _c = c; + fail!() + } + assert_eq!(p.recv_opt(), None); + } + + #[test] + fn smoke_opts() { + let mut opts = TaskOpts::new(); + opts.name = Some(SendStrStatic("test")); + opts.stack_size = Some(20 * 4096); + let (p, c) = Chan::new(); + opts.notify_chan = Some(c); + spawn_opts(opts, proc() {}); + assert!(p.recv().is_ok()); + } + + #[test] + fn smoke_opts_fail() { + let mut opts = TaskOpts::new(); + let (p, c) = Chan::new(); + opts.notify_chan = Some(c); + spawn_opts(opts, proc() { fail!() }); + assert!(p.recv().is_err()); + } + + #[test] + fn yield_test() { + let (p, c) = Chan::new(); + do spawn_opts(TaskOpts::new()) { + 10.times(task::deschedule); + c.send(()); + } + p.recv(); + } + + #[test] + fn spawn_children() { + let (p, c) = Chan::new(); + do spawn_opts(TaskOpts::new()) { + let (p, c2) = Chan::new(); + do spawn { + let (p, c3) = Chan::new(); + do spawn { + c3.send(()); + } + p.recv(); + c2.send(()); + } + p.recv(); + c.send(()); + } + p.recv(); + } + + #[test] + fn spawn_inherits() { + let (p, c) = Chan::new(); + do spawn_opts(TaskOpts::new()) { + let c = c; + do spawn { + let mut task: ~Task = Local::take(); + match task.maybe_take_runtime::() { + Some(ops) => { + task.put_runtime(ops as ~Runtime); + } + None => fail!(), + } + Local::put(task); + c.send(()); + } + } + p.recv(); + } +} diff --git a/src/libstd/io/native/file.rs b/src/libnative/io/file.rs similarity index 97% rename from src/libstd/io/native/file.rs rename to src/libnative/io/file.rs index de2655303d606..c1a378c7e3cc1 100644 --- a/src/libstd/io/native/file.rs +++ b/src/libnative/io/file.rs @@ -10,28 +10,21 @@ //! Blocking posix-based file I/O -#[allow(non_camel_case_types)]; - -use c_str::CString; -use io::IoError; -use io; -use libc::c_int; -use libc; -use ops::Drop; -use option::{Some, None, Option}; -use os; -use path::{Path, GenericPath}; -use ptr::RawPtr; -use result::{Result, Ok, Err}; -use rt::rtio; +use std::c_str::CString; +use std::io::IoError; +use std::io; +use std::libc::c_int; +use std::libc; +use std::os; +use std::rt::rtio; +use std::unstable::intrinsics; +use std::vec; + use super::IoResult; -use unstable::intrinsics; -use vec::ImmutableVector; -use vec; -#[cfg(windows)] use os::win32::{as_utf16_p, fill_utf16_buf_and_decode}; -#[cfg(windows)] use ptr; -#[cfg(windows)] use str; +#[cfg(windows)] use std::os::win32::{as_utf16_p, fill_utf16_buf_and_decode}; +#[cfg(windows)] use std::ptr; +#[cfg(windows)] use std::str; fn keep_going(data: &[u8], f: |*u8, uint| -> i64) -> i64 { #[cfg(windows)] static eintr: int = 0; // doesn't matter @@ -490,8 +483,8 @@ pub fn readdir(p: &CString) -> IoResult<~[Path]> { unsafe { #[cfg(not(windows))] unsafe fn get_list(p: &CString) -> IoResult<~[Path]> { - use libc::{dirent_t}; - use libc::{opendir, readdir, closedir}; + use std::libc::{dirent_t}; + use std::libc::{opendir, readdir, closedir}; extern { fn rust_list_dir_val(ptr: *dirent_t) -> *libc::c_char; } @@ -517,14 +510,14 @@ pub fn readdir(p: &CString) -> IoResult<~[Path]> { #[cfg(windows)] unsafe fn get_list(p: &CString) -> IoResult<~[Path]> { - use libc::consts::os::extra::INVALID_HANDLE_VALUE; - use libc::{wcslen, free}; - use libc::funcs::extra::kernel32::{ + use std::libc::consts::os::extra::INVALID_HANDLE_VALUE; + use std::libc::{wcslen, free}; + use std::libc::funcs::extra::kernel32::{ FindFirstFileW, FindNextFileW, FindClose, }; - use libc::types::os::arch::extra::HANDLE; + use std::libc::types::os::arch::extra::HANDLE; use os::win32::{ as_utf16_p }; @@ -906,12 +899,11 @@ pub fn utime(p: &CString, atime: u64, mtime: u64) -> IoResult<()> { #[cfg(test)] mod tests { - use io::native::file::{CFile, FileDesc}; - use io; - use libc; - use os; - use result::Ok; - use rt::rtio::RtioFileStream; + use super::{CFile, FileDesc}; + use std::io; + use std::libc; + use std::os; + use std::rt::rtio::RtioFileStream; #[ignore(cfg(target_os = "freebsd"))] // hmm, maybe pipes have a tiny buffer #[test] diff --git a/src/libstd/io/native/mod.rs b/src/libnative/io/mod.rs similarity index 92% rename from src/libstd/io/native/mod.rs rename to src/libnative/io/mod.rs index d9dccc84f1c45..32056215e7c97 100644 --- a/src/libstd/io/native/mod.rs +++ b/src/libnative/io/mod.rs @@ -21,24 +21,21 @@ //! play. The only dependencies of these modules are the normal system libraries //! that you would find on the respective platform. -use c_str::CString; -use comm::SharedChan; -use libc::c_int; -use libc; -use option::{Option, None, Some}; -use os; -use path::Path; -use result::{Result, Ok, Err}; -use rt::rtio; -use rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket, RtioUnixListener, - RtioPipe, RtioFileStream, RtioProcess, RtioSignal, RtioTTY, - CloseBehavior, RtioTimer}; -use io; -use io::IoError; -use io::net::ip::SocketAddr; -use io::process::ProcessConfig; -use io::signal::Signum; -use ai = io::net::addrinfo; +use std::c_str::CString; +use std::comm::SharedChan; +use std::libc::c_int; +use std::libc; +use std::os; +use std::rt::rtio; +use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket, + RtioUnixListener, RtioPipe, RtioFileStream, RtioProcess, + RtioSignal, RtioTTY, CloseBehavior, RtioTimer}; +use std::io; +use std::io::IoError; +use std::io::net::ip::SocketAddr; +use std::io::process::ProcessConfig; +use std::io::signal::Signum; +use ai = std::io::net::addrinfo; // Local re-exports pub use self::file::FileDesc; @@ -223,6 +220,3 @@ impl rtio::IoFactory for IoFactory { Err(unimpl()) } } - -pub static mut NATIVE_IO_FACTORY: IoFactory = IoFactory; - diff --git a/src/libstd/io/native/process.rs b/src/libnative/io/process.rs similarity index 95% rename from src/libstd/io/native/process.rs rename to src/libnative/io/process.rs index ef972dc4d0ad1..64ce9d7e3482d 100644 --- a/src/libstd/io/native/process.rs +++ b/src/libnative/io/process.rs @@ -8,18 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use io; -use libc::{pid_t, c_void, c_int}; -use libc; -use os; -use prelude::*; -use ptr; -use rt::rtio; -use super::file; -#[cfg(windows)] -use cast; +use std::io; +use std::libc::{pid_t, c_void, c_int}; +use std::libc; +use std::os; +use std::ptr; +use std::rt::rtio; +use p = std::io::process; -use p = io::process; +#[cfg(windows)] use std::cast; + +use super::file; /** * A value representing a child process. @@ -179,22 +178,22 @@ fn spawn_process_os(prog: &str, args: &[~str], env: Option<~[(~str, ~str)]>, dir: Option<&Path>, in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult { - use libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO}; - use libc::consts::os::extra::{ + use std::libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO}; + use std::libc::consts::os::extra::{ TRUE, FALSE, STARTF_USESTDHANDLES, INVALID_HANDLE_VALUE, DUPLICATE_SAME_ACCESS }; - use libc::funcs::extra::kernel32::{ + use std::libc::funcs::extra::kernel32::{ GetCurrentProcess, DuplicateHandle, CloseHandle, CreateProcessA }; - use libc::funcs::extra::msvcrt::get_osfhandle; + use std::libc::funcs::extra::msvcrt::get_osfhandle; - use mem; + use std::mem; unsafe { @@ -256,10 +255,10 @@ fn spawn_process_os(prog: &str, args: &[~str], fail!("failure in CreateProcess: {}", *msg); } - // We close the thread handle because we don't care about keeping the + // We close the thread handle because std::we don't care about keeping the // thread id valid, and we aren't keeping the thread handle around to be // able to close it later. We don't close the process handle however - // because we want the process id to stay valid at least until the + // because std::we want the process id to stay valid at least until the // calling code closes the process handle. CloseHandle(pi.hThread); @@ -362,8 +361,8 @@ fn spawn_process_os(prog: &str, args: &[~str], env: Option<~[(~str, ~str)]>, dir: Option<&Path>, in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult { - use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp}; - use libc::funcs::bsd44::getdtablesize; + use std::libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp}; + use std::libc::funcs::bsd44::getdtablesize; mod rustrt { extern { @@ -433,7 +432,7 @@ fn spawn_process_os(prog: &str, args: &[~str], #[cfg(unix)] fn with_argv(prog: &str, args: &[~str], cb: |**libc::c_char| -> T) -> T { - use vec; + use std::vec; // We can't directly convert `str`s into `*char`s, as someone needs to hold // a reference to the intermediary byte buffers. So first build an array to @@ -459,7 +458,7 @@ fn with_argv(prog: &str, args: &[~str], cb: |**libc::c_char| -> T) -> T { #[cfg(unix)] fn with_envp(env: Option<~[(~str, ~str)]>, cb: |*c_void| -> T) -> T { - use vec; + use std::vec; // On posixy systems we can pass a char** for envp, which is a // null-terminated array of "k=v\n" strings. Like `with_argv`, we have to @@ -540,8 +539,8 @@ fn waitpid(pid: pid_t) -> int { #[cfg(windows)] fn waitpid_os(pid: pid_t) -> int { - use libc::types::os::arch::extra::DWORD; - use libc::consts::os::extra::{ + use std::libc::types::os::arch::extra::DWORD; + use std::libc::consts::os::extra::{ SYNCHRONIZE, PROCESS_QUERY_INFORMATION, FALSE, @@ -549,7 +548,7 @@ fn waitpid(pid: pid_t) -> int { INFINITE, WAIT_FAILED }; - use libc::funcs::extra::kernel32::{ + use std::libc::funcs::extra::kernel32::{ OpenProcess, GetExitCodeProcess, CloseHandle, @@ -585,7 +584,7 @@ fn waitpid(pid: pid_t) -> int { #[cfg(unix)] fn waitpid_os(pid: pid_t) -> int { - use libc::funcs::posix01::wait::*; + use std::libc::funcs::posix01::wait; #[cfg(target_os = "linux")] #[cfg(target_os = "android")] @@ -612,7 +611,7 @@ fn waitpid(pid: pid_t) -> int { } let mut status = 0 as c_int; - if unsafe { waitpid(pid, &mut status, 0) } == -1 { + if unsafe { wait::waitpid(pid, &mut status, 0) } == -1 { fail!("failure in waitpid: {}", os::last_os_error()); } diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs new file mode 100644 index 0000000000000..e066659265149 --- /dev/null +++ b/src/libnative/lib.rs @@ -0,0 +1,94 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The native runtime crate +//! +//! This crate contains an implementation of 1:1 scheduling for a "native" +//! runtime. In addition, all I/O provided by this crate is the thread blocking +//! version of I/O. + +#[pkgid = "native#0.9-pre"]; +#[crate_id = "native#0.9-pre"]; +#[license = "MIT/ASL2"]; +#[crate_type = "rlib"]; +#[crate_type = "dylib"]; + +// Allow check-stage0-native for now +#[cfg(stage0, test)] extern mod green; + +// NB this crate explicitly does *not* allow glob imports, please seriously +// consider whether they're needed before adding that feature here (the +// answer is that you don't need them) + +use std::os; +use std::rt::local::Local; +use std::rt::task::Task; +use std::rt; + +pub mod io; +pub mod task; + + +// XXX: this should not exist here +#[cfg(stage0)] +#[lang = "start"] +pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { + use std::cast; + use std::task; + + do start(argc, argv) { + // Instead of invoking main directly on this thread, invoke it on + // another spawned thread that we are guaranteed to know the size of the + // stack of. Currently, we do not have a method of figuring out the size + // of the main thread's stack, so for stack overflow detection to work + // we must spawn the task in a subtask which we know the stack size of. + let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; + let mut task = task::task(); + task.name("
"); + match do task.try { main() } { + Ok(()) => { os::set_exit_status(0); } + Err(..) => { os::set_exit_status(rt::DEFAULT_ERROR_CODE); } + } + } +} + +/// Executes the given procedure after initializing the runtime with the given +/// argc/argv. +/// +/// This procedure is guaranteed to run on the thread calling this function, but +/// the stack bounds for this rust task will *not* be set. Care must be taken +/// for this function to not overflow its stack. +/// +/// This function will only return once *all* native threads in the system have +/// exited. +pub fn start(argc: int, argv: **u8, main: proc()) -> int { + rt::init(argc, argv); + let mut exit_code = None; + let mut main = Some(main); + task::new().run(|| { + exit_code = Some(run(main.take_unwrap())); + }); + unsafe { rt::cleanup(); } + return exit_code.unwrap(); +} + +/// Executes a procedure on the current thread in a Rust task context. +/// +/// This function has all of the same details as `start` except for a different +/// number of arguments. +pub fn run(main: proc()) -> int { + // Run the main procedure and then wait for everything to finish + main(); + unsafe { + let mut task = Local::borrow(None::); + task.get().wait_for_other_tasks(); + } + os::get_exit_status() +} diff --git a/src/libnative/task.rs b/src/libnative/task.rs new file mode 100644 index 0000000000000..12e361d8041c8 --- /dev/null +++ b/src/libnative/task.rs @@ -0,0 +1,330 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tasks implemented on top of OS threads +//! +//! This module contains the implementation of the 1:1 threading module required +//! by rust tasks. This implements the necessary API traits laid out by std::rt +//! in order to spawn new tasks and deschedule the current task. + +use std::cast; +use std::rt::env; +use std::rt::local::Local; +use std::rt::rtio; +use std::rt::task::{Task, BlockedTask}; +use std::rt::thread::Thread; +use std::rt; +use std::task::TaskOpts; +use std::unstable::mutex::Mutex; +use std::unstable::stack; + +use io; +use task; + +/// Creates a new Task which is ready to execute as a 1:1 task. +pub fn new() -> ~Task { + let mut task = ~Task::new(); + task.put_runtime(~Ops { + lock: unsafe { Mutex::new() }, + awoken: false, + } as ~rt::Runtime); + return task; +} + +/// Spawns a function with the default configuration +pub fn spawn(f: proc()) { + spawn_opts(TaskOpts::new(), f) +} + +/// Spawns a new task given the configuration options and a procedure to run +/// inside the task. +pub fn spawn_opts(opts: TaskOpts, f: proc()) { + let TaskOpts { + watched: _watched, + notify_chan, name, stack_size + } = opts; + + let mut task = new(); + task.name = name; + match notify_chan { + Some(chan) => { + let on_exit = proc(task_result) { chan.send(task_result) }; + task.death.on_exit = Some(on_exit); + } + None => {} + } + + let stack = stack_size.unwrap_or(env::min_stack()); + let task = task; + + // Spawning a new OS thread guarantees that __morestack will never get + // triggered, but we must manually set up the actual stack bounds once this + // function starts executing. This raises the lower limit by a bit because + // by the time that this function is executing we've already consumed at + // least a little bit of stack (we don't know the exact byte address at + // which our stack started). + Thread::spawn_stack(stack, proc() { + let something_around_the_top_of_the_stack = 1; + let addr = &something_around_the_top_of_the_stack as *int; + unsafe { + let my_stack = addr as uint; + stack::record_stack_bounds(my_stack - stack + 1024, my_stack); + } + + let mut f = Some(f); + task.run(|| { f.take_unwrap()() }); + }) +} + +// This structure is the glue between channels and the 1:1 scheduling mode. This +// structure is allocated once per task. +struct Ops { + lock: Mutex, // native synchronization + awoken: bool, // used to prevent spurious wakeups +} + +impl rt::Runtime for Ops { + fn yield_now(~self, mut cur_task: ~Task) { + // put the task back in TLS and then invoke the OS thread yield + cur_task.put_runtime(self as ~rt::Runtime); + Local::put(cur_task); + Thread::yield_now(); + } + + fn maybe_yield(~self, mut cur_task: ~Task) { + // just put the task back in TLS, on OS threads we never need to + // opportunistically yield b/c the OS will do that for us (preemption) + cur_task.put_runtime(self as ~rt::Runtime); + Local::put(cur_task); + } + + fn wrap(~self) -> ~Any { + self as ~Any + } + + // This function gets a little interesting. There are a few safety and + // ownership violations going on here, but this is all done in the name of + // shared state. Additionally, all of the violations are protected with a + // mutex, so in theory there are no races. + // + // The first thing we need to do is to get a pointer to the task's internal + // mutex. This address will not be changing (because the task is allocated + // on the heap). We must have this handle separately because the task will + // have its ownership transferred to the given closure. We're guaranteed, + // however, that this memory will remain valid because *this* is the current + // task's execution thread. + // + // The next weird part is where ownership of the task actually goes. We + // relinquish it to the `f` blocking function, but upon returning this + // function needs to replace the task back in TLS. There is no communication + // from the wakeup thread back to this thread about the task pointer, and + // there's really no need to. In order to get around this, we cast the task + // to a `uint` which is then used at the end of this function to cast back + // to a `~Task` object. Naturally, this looks like it violates ownership + // semantics in that there may be two `~Task` objects. + // + // The fun part is that the wakeup half of this implementation knows to + // "forget" the task on the other end. This means that the awakening half of + // things silently relinquishes ownership back to this thread, but not in a + // way that the compiler can understand. The task's memory is always valid + // for both tasks because these operations are all done inside of a mutex. + // + // You'll also find that if blocking fails (the `f` function hands the + // BlockedTask back to us), we will `cast::forget` the handles. The + // reasoning for this is the same logic as above in that the task silently + // transfers ownership via the `uint`, not through normal compiler + // semantics. + // + // On a mildly unrelated note, it should also be pointed out that OS + // condition variables are susceptible to spurious wakeups, which we need to + // be ready for. In order to accomodate for this fact, we have an extra + // `awoken` field which indicates whether we were actually woken up via some + // invocation of `reawaken`. This flag is only ever accessed inside the + // lock, so there's no need to make it atomic. + fn deschedule(mut ~self, times: uint, mut cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>) { + let me = &mut *self as *mut Ops; + cur_task.put_runtime(self as ~rt::Runtime); + + unsafe { + let cur_task_dupe = *cast::transmute::<&~Task, &uint>(&cur_task); + let task = BlockedTask::block(cur_task); + + if times == 1 { + (*me).lock.lock(); + (*me).awoken = false; + match f(task) { + Ok(()) => { + while !(*me).awoken { + (*me).lock.wait(); + } + } + Err(task) => { cast::forget(task.wake()); } + } + (*me).lock.unlock(); + } else { + let mut iter = task.make_selectable(times); + (*me).lock.lock(); + (*me).awoken = false; + let success = iter.all(|task| { + match f(task) { + Ok(()) => true, + Err(task) => { + cast::forget(task.wake()); + false + } + } + }); + while success && !(*me).awoken { + (*me).lock.wait(); + } + (*me).lock.unlock(); + } + // re-acquire ownership of the task + cur_task = cast::transmute::(cur_task_dupe); + } + + // put the task back in TLS, and everything is as it once was. + Local::put(cur_task); + } + + // See the comments on `deschedule` for why the task is forgotten here, and + // why it's valid to do so. + fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) { + unsafe { + let me = &mut *self as *mut Ops; + to_wake.put_runtime(self as ~rt::Runtime); + cast::forget(to_wake); + (*me).lock.lock(); + (*me).awoken = true; + (*me).lock.signal(); + (*me).lock.unlock(); + } + } + + fn spawn_sibling(~self, mut cur_task: ~Task, opts: TaskOpts, f: proc()) { + cur_task.put_runtime(self as ~rt::Runtime); + Local::put(cur_task); + + task::spawn_opts(opts, f); + } + + fn local_io<'a>(&'a mut self) -> Option> { + static mut io: io::IoFactory = io::IoFactory; + // Unsafety is from accessing `io`, which is guaranteed to be safe + // because you can't do anything usable with this statically initialized + // unit struct. + Some(unsafe { rtio::LocalIo::new(&mut io as &mut rtio::IoFactory) }) + } +} + +impl Drop for Ops { + fn drop(&mut self) { + unsafe { self.lock.destroy() } + } +} + +#[cfg(test)] +mod tests { + use std::rt::Runtime; + use std::rt::local::Local; + use std::rt::task::Task; + use std::task; + use std::task::TaskOpts; + use super::{spawn, spawn_opts, Ops}; + + #[test] + fn smoke() { + let (p, c) = Chan::new(); + do spawn { + c.send(()); + } + p.recv(); + } + + #[test] + fn smoke_fail() { + let (p, c) = Chan::<()>::new(); + do spawn { + let _c = c; + fail!() + } + assert_eq!(p.recv_opt(), None); + } + + #[test] + fn smoke_opts() { + let mut opts = TaskOpts::new(); + opts.name = Some(SendStrStatic("test")); + opts.stack_size = Some(20 * 4096); + let (p, c) = Chan::new(); + opts.notify_chan = Some(c); + spawn_opts(opts, proc() {}); + assert!(p.recv().is_ok()); + } + + #[test] + fn smoke_opts_fail() { + let mut opts = TaskOpts::new(); + let (p, c) = Chan::new(); + opts.notify_chan = Some(c); + spawn_opts(opts, proc() { fail!() }); + assert!(p.recv().is_err()); + } + + #[test] + fn yield_test() { + let (p, c) = Chan::new(); + do spawn { + 10.times(task::deschedule); + c.send(()); + } + p.recv(); + } + + #[test] + fn spawn_children() { + let (p, c) = Chan::new(); + do spawn { + let (p, c2) = Chan::new(); + do spawn { + let (p, c3) = Chan::new(); + do spawn { + c3.send(()); + } + p.recv(); + c2.send(()); + } + p.recv(); + c.send(()); + } + p.recv(); + } + + #[test] + fn spawn_inherits() { + let (p, c) = Chan::new(); + do spawn { + let c = c; + do spawn { + let mut task: ~Task = Local::take(); + match task.maybe_take_runtime::() { + Some(ops) => { + task.put_runtime(ops as ~Runtime); + } + None => fail!(), + } + Local::put(task); + c.send(()); + } + } + p.recv(); + } +} diff --git a/src/librustc/back/link.rs b/src/librustc/back/link.rs index 0cf91fbba0e88..214f60291feae 100644 --- a/src/librustc/back/link.rs +++ b/src/librustc/back/link.rs @@ -333,6 +333,10 @@ pub mod write { } unsafe fn configure_llvm(sess: Session) { + use std::unstable::mutex::{MUTEX_INIT, Mutex}; + static mut LOCK: Mutex = MUTEX_INIT; + static mut CONFIGURED: bool = false; + // Copy what clan does by turning on loop vectorization at O2 and // slp vectorization at O3 let vectorize_loop = !sess.no_vectorize_loops() && @@ -360,7 +364,13 @@ pub mod write { add(*arg); } - llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr()); + LOCK.lock(); + if !CONFIGURED { + llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, + llvm_args.as_ptr()); + CONFIGURED = true; + } + LOCK.unlock(); } unsafe fn populate_llvm_passes(fpm: lib::llvm::PassManagerRef, diff --git a/src/librustc/front/std_inject.rs b/src/librustc/front/std_inject.rs index a40f8183e1904..1503e4effeb36 100644 --- a/src/librustc/front/std_inject.rs +++ b/src/librustc/front/std_inject.rs @@ -70,6 +70,15 @@ impl fold::ast_fold for StandardLibraryInjector { }]; if use_uv(&crate) && !*self.sess.building_library { + vis.push(ast::view_item { + node: ast::view_item_extern_mod(self.sess.ident_of("green"), + None, + ~[vers_item], + ast::DUMMY_NODE_ID), + attrs: ~[], + vis: ast::private, + span: dummy_sp() + }); vis.push(ast::view_item { node: ast::view_item_extern_mod(self.sess.ident_of("rustuv"), None, diff --git a/src/librustc/middle/trans/debuginfo.rs b/src/librustc/middle/trans/debuginfo.rs index a77e8f764f38d..61fadb7e23654 100644 --- a/src/librustc/middle/trans/debuginfo.rs +++ b/src/librustc/middle/trans/debuginfo.rs @@ -146,7 +146,7 @@ use std::hashmap::HashMap; use std::hashmap::HashSet; use std::libc::{c_uint, c_ulonglong, c_longlong}; use std::ptr; -use std::unstable::atomics; +use std::sync::atomics; use std::vec; use syntax::codemap::{Span, Pos}; use syntax::{ast, codemap, ast_util, ast_map, opt_vec}; diff --git a/src/librustpkg/path_util.rs b/src/librustpkg/path_util.rs index a6b1088335c63..0d927fc36aece 100644 --- a/src/librustpkg/path_util.rs +++ b/src/librustpkg/path_util.rs @@ -390,7 +390,7 @@ pub fn mk_output_path(what: OutputType, where: Target, Bench => "bench", _ => "" }, - os::EXE_SUFFIX)) + os::consts::EXE_SUFFIX)) }; if !output_path.is_absolute() { output_path = os::getcwd().join(&output_path); diff --git a/src/librustpkg/tests.rs b/src/librustpkg/tests.rs index ecf08df5f185a..21f18eda1409d 100644 --- a/src/librustpkg/tests.rs +++ b/src/librustpkg/tests.rs @@ -487,8 +487,9 @@ fn lib_output_file_name(workspace: &Path, short_name: &str) -> Path { } fn output_file_name(workspace: &Path, short_name: ~str) -> Path { - target_build_dir(workspace).join(short_name.as_slice()).join(format!("{}{}", short_name, - os::EXE_SUFFIX)) + target_build_dir(workspace).join(short_name.as_slice()) + .join(format!("{}{}", short_name, + os::consts::EXE_SUFFIX)) } #[cfg(target_os = "linux")] @@ -1353,7 +1354,7 @@ fn test_import_rustpkg() { command_line_test([~"build", ~"foo"], workspace); debug!("workspace = {}", workspace.display()); assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}", - os::EXE_SUFFIX)).exists()); + os::consts::EXE_SUFFIX)).exists()); } #[test] @@ -1366,7 +1367,7 @@ fn test_macro_pkg_script() { command_line_test([~"build", ~"foo"], workspace); debug!("workspace = {}", workspace.display()); assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}", - os::EXE_SUFFIX)).exists()); + os::consts::EXE_SUFFIX)).exists()); } #[test] diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 04c1c5a9fb8c9..f6fad524b5c68 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -11,12 +11,10 @@ use ai = std::io::net::addrinfo; use std::libc::c_int; use std::ptr::null; -use std::rt::BlockedTask; -use std::rt::local::Local; -use std::rt::sched::Scheduler; +use std::rt::task::BlockedTask; use net; -use super::{Loop, UvError, Request, wait_until_woken_after}; +use super::{Loop, UvError, Request, wait_until_woken_after, wakeup}; use uvll; struct Addrinfo { @@ -108,8 +106,7 @@ impl GetAddrInfoRequest { cx.status = status; cx.addrinfo = Some(Addrinfo { handle: res }); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(cx.slot.take_unwrap()); + wakeup(&mut cx.slot); } } } @@ -188,12 +185,13 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { #[cfg(test, not(target_os="android"))] mod test { use std::io::net::ip::{SocketAddr, Ipv4Addr}; - use super::*; use super::super::local_loop; + use super::GetAddrInfoRequest; #[test] fn getaddrinfo_test() { - match GetAddrInfoRequest::run(local_loop(), Some("localhost"), None, None) { + let loop_ = &mut local_loop().loop_; + match GetAddrInfoRequest::run(loop_, Some("localhost"), None, None) { Ok(infos) => { let mut found_local = false; let local_addr = &SocketAddr { @@ -211,9 +209,10 @@ mod test { #[test] fn issue_10663() { + let loop_ = &mut local_loop().loop_; // Something should happen here, but this certainly shouldn't cause // everything to die. The actual outcome we don't care too much about. - GetAddrInfoRequest::run(local_loop(), Some("irc.n0v4.com"), None, + GetAddrInfoRequest::run(loop_, Some("irc.n0v4.com"), None, None); } } diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 5a0db8313fb5c..0c353785982e6 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -127,16 +127,15 @@ impl Drop for AsyncWatcher { mod test_remote { use std::rt::rtio::Callback; use std::rt::thread::Thread; - use std::rt::tube::Tube; - use super::*; + use super::AsyncWatcher; use super::super::local_loop; // Make sure that we can fire watchers in remote threads and that they // actually trigger what they say they will. #[test] fn smoke_test() { - struct MyCallback(Option>); + struct MyCallback(Option>); impl Callback for MyCallback { fn call(&mut self) { // this can get called more than once, but we only want to send @@ -147,16 +146,17 @@ mod test_remote { } } - let mut tube = Tube::new(); - let cb = ~MyCallback(Some(tube.clone())); - let watcher = AsyncWatcher::new(local_loop(), cb as ~Callback); + let (port, chan) = Chan::new(); + let cb = ~MyCallback(Some(chan)); + let watcher = AsyncWatcher::new(&mut local_loop().loop_, + cb as ~Callback); let thread = do Thread::start { let mut watcher = watcher; watcher.fire(); }; - assert_eq!(tube.recv(), 1); + assert_eq!(port.recv(), 1); thread.join(); } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 3a8af71e019ef..82d0fd823a320 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -14,15 +14,14 @@ use std::cast::transmute; use std::cast; use std::libc::{c_int, c_char, c_void, size_t}; use std::libc; -use std::rt::BlockedTask; +use std::rt::task::BlockedTask; use std::io::{FileStat, IoError}; use std::io; -use std::rt::local::Local; use std::rt::rtio; -use std::rt::sched::{Scheduler, SchedHandle}; -use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after}; -use uvio::HomingIO; +use homing::{HomingIO, HomeHandle}; +use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after, wakeup}; +use uvio::UvIoFactory; use uvll; pub struct FsRequest { @@ -34,19 +33,19 @@ pub struct FileWatcher { priv loop_: Loop, priv fd: c_int, priv close: rtio::CloseBehavior, - priv home: SchedHandle, + priv home: HomeHandle, } impl FsRequest { - pub fn open(loop_: &Loop, path: &CString, flags: int, mode: int) + pub fn open(io: &mut UvIoFactory, path: &CString, flags: int, mode: int) -> Result { execute(|req, cb| unsafe { - uvll::uv_fs_open(loop_.handle, + uvll::uv_fs_open(io.uv_loop(), req, path.with_ref(|p| p), flags as c_int, mode as c_int, cb) }).map(|req| - FileWatcher::new(*loop_, req.get_result() as c_int, + FileWatcher::new(io, req.get_result() as c_int, rtio::CloseSynchronously) ) } @@ -320,8 +319,7 @@ fn execute(f: |*uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int) let slot: &mut Option = unsafe { cast::transmute(uvll::get_data_for_req(req)) }; - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(slot.take_unwrap()); + wakeup(slot); } } @@ -331,16 +329,17 @@ fn execute_nop(f: |*uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int) } impl HomingIO for FileWatcher { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl FileWatcher { - pub fn new(loop_: Loop, fd: c_int, close: rtio::CloseBehavior) -> FileWatcher { + pub fn new(io: &mut UvIoFactory, fd: c_int, + close: rtio::CloseBehavior) -> FileWatcher { FileWatcher { - loop_: loop_, + loop_: Loop::wrap(io.uv_loop()), fd: fd, close: close, - home: get_handle_to_current_scheduler!() + home: io.make_handle(), } } @@ -448,8 +447,11 @@ mod test { use std::io; use std::str; use std::vec; - use super::*; - use l = super::super::local_loop; + use super::FsRequest; + use super::super::Loop; + use super::super::local_loop; + + fn l() -> &mut Loop { &mut local_loop().loop_ } #[test] fn file_test_full_simple_sync() { @@ -460,7 +462,7 @@ mod test { { // open/create - let result = FsRequest::open(l(), &path_str.to_c_str(), + let result = FsRequest::open(local_loop(), &path_str.to_c_str(), create_flags as int, mode as int); assert!(result.is_ok()); let result = result.unwrap(); @@ -473,7 +475,7 @@ mod test { { // re-open - let result = FsRequest::open(l(), &path_str.to_c_str(), + let result = FsRequest::open(local_loop(), &path_str.to_c_str(), read_flags as int, 0); assert!(result.is_ok()); let result = result.unwrap(); @@ -500,7 +502,7 @@ mod test { let create_flags = (O_RDWR | O_CREAT) as int; let mode = (S_IWUSR | S_IRUSR) as int; - let result = FsRequest::open(l(), path, create_flags, mode); + let result = FsRequest::open(local_loop(), path, create_flags, mode); assert!(result.is_ok()); let file = result.unwrap(); diff --git a/src/librustuv/homing.rs b/src/librustuv/homing.rs new file mode 100644 index 0000000000000..16534b7b38bab --- /dev/null +++ b/src/librustuv/homing.rs @@ -0,0 +1,212 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Homing I/O implementation +//! +//! In libuv, whenever a handle is created on an I/O loop it is illegal to use +//! that handle outside of that I/O loop. We use libuv I/O with our green +//! scheduler, and each green scheduler corresponds to a different I/O loop on a +//! different OS thread. Green tasks are also free to roam among schedulers, +//! which implies that it is possible to create an I/O handle on one event loop +//! and then attempt to use it on another. +//! +//! In order to solve this problem, this module implements the notion of a +//! "homing operation" which will transplant a task from its currently running +//! scheduler back onto the original I/O loop. This is accomplished entirely at +//! the librustuv layer with very little cooperation from the scheduler (which +//! we don't even know exists technically). +//! +//! These homing operations are completed by first realizing that we're on the +//! wrong I/O loop, then descheduling ourselves, sending ourselves to the +//! correct I/O loop, and then waking up the I/O loop in order to process its +//! local queue of tasks which need to run. +//! +//! This enqueueing is done with a concurrent queue from libstd, and the +//! signalling is achieved with an async handle. + +#[allow(dead_code)]; + +use std::cast; +use std::rt::local::Local; +use std::rt::rtio::LocalIo; +use std::rt::task::{Task, BlockedTask}; + +use ForbidUnwind; +use queue::{Queue, QueuePool}; + +/// A handle to a remote libuv event loop. This handle will keep the event loop +/// alive while active in order to ensure that a homing operation can always be +/// completed. +/// +/// Handles are clone-able in order to derive new handles from existing handles +/// (very useful for when accepting a socket from a server). +pub struct HomeHandle { + priv queue: Queue, + priv id: uint, +} + +impl HomeHandle { + pub fn new(id: uint, pool: &mut QueuePool) -> HomeHandle { + HomeHandle { queue: pool.queue(), id: id } + } + + fn send(&mut self, task: BlockedTask) { + self.queue.push(task); + } +} + +impl Clone for HomeHandle { + fn clone(&self) -> HomeHandle { + HomeHandle { + queue: self.queue.clone(), + id: self.id, + } + } +} + +pub fn local_id() -> uint { + let mut io = match LocalIo::borrow() { + Some(io) => io, None => return 0, + }; + let io = io.get(); + unsafe { + let (_vtable, ptr): (uint, uint) = cast::transmute(io); + return ptr; + } +} + +pub trait HomingIO { + fn home<'r>(&'r mut self) -> &'r mut HomeHandle; + + /// This function will move tasks to run on their home I/O scheduler. Note + /// that this function does *not* pin the task to the I/O scheduler, but + /// rather it simply moves it to running on the I/O scheduler. + fn go_to_IO_home(&mut self) -> uint { + let _f = ForbidUnwind::new("going home"); + + let cur_loop_id = local_id(); + let destination = self.home().id; + + // Try at all costs to avoid the homing operation because it is quite + // expensive. Hence, we only deschedule/send if we're not on the correct + // event loop. If we're already on the home event loop, then we're good + // to go (remember we have no preemption, so we're guaranteed to stay on + // this event loop as long as we avoid the scheduler). + if cur_loop_id != destination { + let cur_task: ~Task = Local::take(); + cur_task.deschedule(1, |task| { + self.home().send(task); + Ok(()) + }); + + // Once we wake up, assert that we're in the right location + assert_eq!(local_id(), destination); + } + + return destination; + } + + /// Fires a single homing missile, returning another missile targeted back + /// at the original home of this task. In other words, this function will + /// move the local task to its I/O scheduler and then return an RAII wrapper + /// which will return the task home. + fn fire_homing_missile(&mut self) -> HomingMissile { + HomingMissile { io_home: self.go_to_IO_home() } + } +} + +/// After a homing operation has been completed, this will return the current +/// task back to its appropriate home (if applicable). The field is used to +/// assert that we are where we think we are. +struct HomingMissile { + priv io_home: uint, +} + +impl HomingMissile { + /// Check at runtime that the task has *not* transplanted itself to a + /// different I/O loop while executing. + pub fn check(&self, msg: &'static str) { + assert!(local_id() == self.io_home, "{}", msg); + } +} + +impl Drop for HomingMissile { + fn drop(&mut self) { + let _f = ForbidUnwind::new("leaving home"); + + // It would truly be a sad day if we had moved off the home I/O + // scheduler while we were doing I/O. + self.check("task moved away from the home scheduler"); + } +} + +#[cfg(test)] +mod test { + use green::sched; + use green::{SchedPool, PoolConfig}; + use std::rt::rtio::RtioUdpSocket; + use std::io::test::next_test_ip4; + use std::task::TaskOpts; + + use net::UdpWatcher; + use super::super::local_loop; + + // On one thread, create a udp socket. Then send that socket to another + // thread and destroy the socket on the remote thread. This should make sure + // that homing kicks in for the socket to go back home to the original + // thread, close itself, and then come back to the last thread. + #[test] + fn test_homing_closes_correctly() { + let (port, chan) = Chan::new(); + let mut pool = SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: None, + }); + + do pool.spawn(TaskOpts::new()) { + let listener = UdpWatcher::bind(local_loop(), next_test_ip4()); + chan.send(listener.unwrap()); + } + + let task = do pool.task(TaskOpts::new()) { + port.recv(); + }; + pool.spawn_sched().send(sched::TaskFromFriend(task)); + + pool.shutdown(); + } + + #[test] + fn test_homing_read() { + let (port, chan) = Chan::new(); + let mut pool = SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: None, + }); + + do pool.spawn(TaskOpts::new()) { + let addr1 = next_test_ip4(); + let addr2 = next_test_ip4(); + let listener = UdpWatcher::bind(local_loop(), addr2); + chan.send((listener.unwrap(), addr1)); + let mut listener = UdpWatcher::bind(local_loop(), addr1).unwrap(); + listener.sendto([1, 2, 3, 4], addr2); + } + + let task = do pool.task(TaskOpts::new()) { + let (mut watcher, addr) = port.recv(); + let mut buf = [0, ..10]; + assert_eq!(watcher.recvfrom(buf).unwrap(), (4, addr)); + }; + pool.spawn_sched().send(sched::TaskFromFriend(task)); + + pool.shutdown(); + } +} diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 32c7699a30847..80d21404e4bcb 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -97,72 +97,102 @@ impl Drop for IdleWatcher { #[cfg(test)] mod test { - use super::*; - use std::rt::tube::Tube; + use std::cast; + use std::cell::RefCell; + use std::rc::Rc; use std::rt::rtio::{Callback, PausableIdleCallback}; + use std::rt::task::{BlockedTask, Task}; + use std::rt::local::Local; + use super::IdleWatcher; use super::super::local_loop; - struct MyCallback(Tube, int); + type Chan = Rc, uint)>>; + + struct MyCallback(Rc, uint)>>, uint); impl Callback for MyCallback { fn call(&mut self) { - match *self { - MyCallback(ref mut tube, val) => tube.send(val) - } + let task = match *self { + MyCallback(ref rc, n) => { + let mut slot = rc.borrow().borrow_mut(); + match *slot.get() { + (ref mut task, ref mut val) => { + *val = n; + task.take_unwrap() + } + } + } + }; + task.wake().map(|t| t.reawaken(true)); } } + fn mk(v: uint) -> (~IdleWatcher, Chan) { + let rc = Rc::from_send(RefCell::new((None, 0))); + let cb = ~MyCallback(rc.clone(), v); + let cb = cb as ~Callback:; + let cb = unsafe { cast::transmute(cb) }; + (IdleWatcher::new(&mut local_loop().loop_, cb), rc) + } + + fn sleep(chan: &Chan) -> uint { + let task: ~Task = Local::take(); + task.deschedule(1, |task| { + let mut slot = chan.borrow().borrow_mut(); + match *slot.get() { + (ref mut slot, _) => { + assert!(slot.is_none()); + *slot = Some(task); + } + } + Ok(()) + }); + + let slot = chan.borrow().borrow(); + match *slot.get() { (_, n) => n } + } + #[test] fn not_used() { - let cb = ~MyCallback(Tube::new(), 1); - let _idle = IdleWatcher::new(local_loop(), cb as ~Callback); + let (_idle, _chan) = mk(1); } #[test] fn smoke_test() { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + let (mut idle, chan) = mk(1); idle.resume(); - tube.recv(); + assert_eq!(sleep(&chan), 1); } #[test] #[should_fail] fn smoke_fail() { - let tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + let (mut idle, _chan) = mk(1); idle.resume(); fail!(); } #[test] fn fun_combinations_of_methods() { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + let (mut idle, chan) = mk(1); idle.resume(); - tube.recv(); + assert_eq!(sleep(&chan), 1); idle.pause(); idle.resume(); idle.resume(); - tube.recv(); + assert_eq!(sleep(&chan), 1); idle.pause(); idle.pause(); idle.resume(); - tube.recv(); + assert_eq!(sleep(&chan), 1); } #[test] fn pause_pauses() { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle1 = IdleWatcher::new(local_loop(), cb as ~Callback); - let cb = ~MyCallback(tube.clone(), 2); - let mut idle2 = IdleWatcher::new(local_loop(), cb as ~Callback); + let (mut idle1, chan1) = mk(1); + let (mut idle2, chan2) = mk(2); idle2.resume(); - assert_eq!(tube.recv(), 2); + assert_eq!(sleep(&chan2), 2); idle2.pause(); idle1.resume(); - assert_eq!(tube.recv(), 1); + assert_eq!(sleep(&chan1), 1); } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 2e5b7254769f1..2ef10dd33ac9c 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -41,23 +41,23 @@ via `close` and `delete` methods. #[crate_type = "rlib"]; #[crate_type = "dylib"]; -#[feature(macro_rules, globs)]; +#[feature(macro_rules)]; + +#[cfg(test)] extern mod green; -use std::cast::transmute; use std::cast; +use std::io; +use std::io::IoError; use std::libc::{c_int, malloc}; use std::ptr::null; use std::ptr; -use std::rt::BlockedTask; use std::rt::local::Local; -use std::rt::sched::Scheduler; +use std::rt::task::{BlockedTask, Task}; use std::str::raw::from_c_str; use std::str; use std::task; use std::unstable::finally::Finally; -use std::io::IoError; - pub use self::async::AsyncWatcher; pub use self::file::{FsRequest, FileWatcher}; pub use self::idle::IdleWatcher; @@ -70,6 +70,9 @@ pub use self::tty::TtyWatcher; mod macros; +mod queue; +mod homing; + /// The implementation of `rtio` for libuv pub mod uvio; @@ -144,32 +147,29 @@ pub trait UvHandle { uvll::free_handle(handle); if data == ptr::null() { return } let slot: &mut Option = cast::transmute(data); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(slot.take_unwrap()); + wakeup(slot); } } } } pub struct ForbidSwitch { - msg: &'static str, - sched: uint, + priv msg: &'static str, + priv io: uint, } impl ForbidSwitch { fn new(s: &'static str) -> ForbidSwitch { - let mut sched = Local::borrow(None::); ForbidSwitch { msg: s, - sched: sched.get().sched_id(), + io: homing::local_id(), } } } impl Drop for ForbidSwitch { fn drop(&mut self) { - let mut sched = Local::borrow(None::); - assert!(self.sched == sched.get().sched_id(), + assert!(self.io == homing::local_id(), "didnt want a scheduler switch: {}", self.msg); } @@ -199,14 +199,20 @@ fn wait_until_woken_after(slot: *mut Option, f: ||) { let _f = ForbidUnwind::new("wait_until_woken_after"); unsafe { assert!((*slot).is_none()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|_, task| { - f(); + let task: ~Task = Local::take(); + task.deschedule(1, |task| { *slot = Some(task); - }) + f(); + Ok(()) + }); } } +fn wakeup(slot: &mut Option) { + assert!(slot.is_some()); + slot.take_unwrap().wake().map(|t| t.reawaken(true)); +} + pub struct Request { handle: *uvll::uv_req_t, priv defused: bool, @@ -325,28 +331,26 @@ fn error_smoke_test() { pub fn uv_error_to_io_error(uverr: UvError) -> IoError { unsafe { // Importing error constants - use uvll::*; - use std::io::*; // uv error descriptions are static let c_desc = uvll::uv_strerror(*uverr); let desc = str::raw::c_str_to_static_slice(c_desc); let kind = match *uverr { - UNKNOWN => OtherIoError, - OK => OtherIoError, - EOF => EndOfFile, - EACCES => PermissionDenied, - ECONNREFUSED => ConnectionRefused, - ECONNRESET => ConnectionReset, - ENOENT => FileNotFound, - ENOTCONN => NotConnected, - EPIPE => BrokenPipe, - ECONNABORTED => ConnectionAborted, + uvll::UNKNOWN => io::OtherIoError, + uvll::OK => io::OtherIoError, + uvll::EOF => io::EndOfFile, + uvll::EACCES => io::PermissionDenied, + uvll::ECONNREFUSED => io::ConnectionRefused, + uvll::ECONNRESET => io::ConnectionReset, + uvll::ENOTCONN => io::NotConnected, + uvll::ENOENT => io::FileNotFound, + uvll::EPIPE => io::BrokenPipe, + uvll::ECONNABORTED => io::ConnectionAborted, err => { uvdebug!("uverr.code {}", err as int); // XXX: Need to map remaining uv error types - OtherIoError + io::OtherIoError } }; @@ -387,15 +391,17 @@ pub fn slice_to_uv_buf(v: &[u8]) -> Buf { uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } +// This function is full of lies! #[cfg(test)] -fn local_loop() -> &'static mut Loop { +fn local_loop() -> &'static mut uvio::UvIoFactory { unsafe { cast::transmute({ - let mut sched = Local::borrow(None::); + let mut task = Local::borrow(None::); + let mut io = task.get().local_io().unwrap(); let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) = - cast::transmute(sched.get().event_loop.io().unwrap()); + cast::transmute(io.get()); uvio - }.uv_loop()) + }) } } diff --git a/src/librustuv/macros.rs b/src/librustuv/macros.rs index a63dcc6de3105..6c8c16784a12b 100644 --- a/src/librustuv/macros.rs +++ b/src/librustuv/macros.rs @@ -27,18 +27,20 @@ macro_rules! uvdebug ( }) ) -// get a handle for the current scheduler -macro_rules! get_handle_to_current_scheduler( - () => ({ - let mut sched = Local::borrow(None::); - sched.get().make_handle() - }) -) - pub fn dumb_println(args: &fmt::Arguments) { - use std::io::native::file::FileDesc; use std::io; use std::libc; - let mut out = FileDesc::new(libc::STDERR_FILENO, false); - fmt::writeln(&mut out as &mut io::Writer, args); + + struct Stderr; + impl io::Writer for Stderr { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDERR_FILENO, + data.as_ptr() as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut w = Stderr; + fmt::writeln(&mut w as &mut io::Writer, args); } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index ce543eafd2f64..85e9202c1fa60 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -9,24 +9,22 @@ // except according to those terms. use std::cast; -use std::libc; -use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; -use std::ptr; -use std::rt::BlockedTask; use std::io::IoError; use std::io::net::ip::{Ipv4Addr, Ipv6Addr, SocketAddr, IpAddr}; -use std::rt::local::Local; +use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; +use std::libc; +use std::ptr; use std::rt::rtio; -use std::rt::sched::{Scheduler, SchedHandle}; -use std::rt::tube::Tube; +use std::rt::task::BlockedTask; use std::str; use std::vec; +use homing::{HomingIO, HomeHandle}; use stream::StreamWatcher; use super::{Loop, Request, UvError, Buf, status_to_io_result, uv_error_to_io_error, UvHandle, slice_to_uv_buf, - wait_until_woken_after}; -use uvio::HomingIO; + wait_until_woken_after, wakeup}; +use uvio::UvIoFactory; use uvll; use uvll::sockaddr; @@ -88,21 +86,19 @@ pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr { } } -#[cfg(test)] #[test] fn test_ip4_conversion() { - use std::rt; - let ip4 = rt::test::next_test_ip4(); + use std::io::net::ip::{SocketAddr, Ipv4Addr}; + let ip4 = SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 4824 }; socket_addr_as_sockaddr(ip4, |addr| { assert_eq!(ip4, sockaddr_to_socket_addr(addr)); }) } -#[cfg(test)] #[test] fn test_ip6_conversion() { - use std::rt; - let ip6 = rt::test::next_test_ip6(); + use std::io::net::ip::{SocketAddr, Ipv6Addr}; + let ip6 = SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: 4824 }; socket_addr_as_sockaddr(ip6, |addr| { assert_eq!(ip6, sockaddr_to_socket_addr(addr)); }) @@ -145,42 +141,47 @@ fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result, - priv outgoing: Tube>, + priv outgoing: Chan>, + priv incoming: Port>, } pub struct TcpAcceptor { listener: ~TcpListener, - priv incoming: Tube>, } // TCP watchers (clients/streams) impl TcpWatcher { - pub fn new(loop_: &Loop) -> TcpWatcher { + pub fn new(io: &mut UvIoFactory) -> TcpWatcher { + let handle = io.make_handle(); + TcpWatcher::new_home(&io.loop_, handle) + } + + fn new_home(loop_: &Loop, home: HomeHandle) -> TcpWatcher { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { uvll::uv_tcp_init(loop_.handle, handle) }, 0); TcpWatcher { - home: get_handle_to_current_scheduler!(), + home: home, handle: handle, stream: StreamWatcher::new(handle), } } - pub fn connect(loop_: &mut Loop, address: SocketAddr) + pub fn connect(io: &mut UvIoFactory, address: SocketAddr) -> Result { struct Ctx { status: c_int, task: Option } - let tcp = TcpWatcher::new(loop_); + let tcp = TcpWatcher::new(io); let ret = socket_addr_as_sockaddr(address, |addr| { let mut req = Request::new(uvll::UV_CONNECT); let result = unsafe { @@ -213,14 +214,13 @@ impl TcpWatcher { assert!(status != uvll::ECANCELED); let cx: &mut Ctx = unsafe { req.get_data() }; cx.status = status; - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(cx.task.take_unwrap()); + wakeup(&mut cx.task); } } } impl HomingIO for TcpWatcher { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl rtio::RtioSocket for TcpWatcher { @@ -290,17 +290,19 @@ impl Drop for TcpWatcher { // TCP listeners (unbound servers) impl TcpListener { - pub fn bind(loop_: &mut Loop, address: SocketAddr) + pub fn bind(io: &mut UvIoFactory, address: SocketAddr) -> Result<~TcpListener, UvError> { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { - uvll::uv_tcp_init(loop_.handle, handle) + uvll::uv_tcp_init(io.uv_loop(), handle) }, 0); + let (port, chan) = Chan::new(); let l = ~TcpListener { - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), handle: handle, closing_task: None, - outgoing: Tube::new(), + outgoing: chan, + incoming: port, }; let res = socket_addr_as_sockaddr(address, |addr| unsafe { uvll::uv_tcp_bind(l.handle, addr) @@ -313,7 +315,7 @@ impl TcpListener { } impl HomingIO for TcpListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle for TcpListener { @@ -330,11 +332,7 @@ impl rtio::RtioSocket for TcpListener { impl rtio::RtioTcpListener for TcpListener { fn listen(mut ~self) -> Result<~rtio::RtioTcpAcceptor, IoError> { // create the acceptor object from ourselves - let incoming = self.outgoing.clone(); - let mut acceptor = ~TcpAcceptor { - listener: self, - incoming: incoming, - }; + let mut acceptor = ~TcpAcceptor { listener: self }; let _m = acceptor.fire_homing_missile(); // XXX: the 128 backlog should be configurable @@ -347,19 +345,18 @@ impl rtio::RtioTcpListener for TcpListener { extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { assert!(status != uvll::ECANCELED); + let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) }; let msg = match status { 0 => { let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); - let client = TcpWatcher::new(&loop_); + let client = TcpWatcher::new_home(&loop_, tcp.home().clone()); assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0); Ok(~client as ~rtio::RtioTcpStream) } n => Err(uv_error_to_io_error(UvError(n))) }; - - let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) }; tcp.outgoing.send(msg); } @@ -373,7 +370,7 @@ impl Drop for TcpListener { // TCP acceptors (bound servers) impl HomingIO for TcpAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { self.listener.home() } } impl rtio::RtioSocket for TcpAcceptor { @@ -385,8 +382,7 @@ impl rtio::RtioSocket for TcpAcceptor { impl rtio::RtioTcpAcceptor for TcpAcceptor { fn accept(&mut self) -> Result<~rtio::RtioTcpStream, IoError> { - let _m = self.fire_homing_missile(); - self.incoming.recv() + self.listener.incoming.recv() } fn accept_simultaneously(&mut self) -> Result<(), IoError> { @@ -410,18 +406,18 @@ impl rtio::RtioTcpAcceptor for TcpAcceptor { pub struct UdpWatcher { handle: *uvll::uv_udp_t, - home: SchedHandle, + home: HomeHandle, } impl UdpWatcher { - pub fn bind(loop_: &Loop, address: SocketAddr) + pub fn bind(io: &mut UvIoFactory, address: SocketAddr) -> Result { let udp = UdpWatcher { handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), }; assert_eq!(unsafe { - uvll::uv_udp_init(loop_.handle, udp.handle) + uvll::uv_udp_init(io.uv_loop(), udp.handle) }, 0); let result = socket_addr_as_sockaddr(address, |addr| unsafe { uvll::uv_udp_bind(udp.handle, addr, 0u32) @@ -438,7 +434,7 @@ impl UvHandle for UdpWatcher { } impl HomingIO for UdpWatcher { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl rtio::RtioSocket for UdpWatcher { @@ -519,9 +515,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { Some(sockaddr_to_socket_addr(addr)) }; cx.result = Some((nread, addr)); - - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + wakeup(&mut cx.task); } } @@ -556,9 +550,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { assert!(status != uvll::ECANCELED); let cx: &mut Ctx = unsafe { req.get_data() }; cx.result = status; - - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + wakeup(&mut cx.task); } } @@ -640,18 +632,13 @@ impl Drop for UdpWatcher { } } -//////////////////////////////////////////////////////////////////////////////// -/// UV request support -//////////////////////////////////////////////////////////////////////////////// - #[cfg(test)] mod test { - use std::rt::test::*; use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioTcpAcceptor, RtioUdpSocket}; - use std::task; + use std::io::test::{next_test_ip4, next_test_ip6}; - use super::*; + use super::{UdpWatcher, TcpWatcher, TcpListener}; use super::super::local_loop; #[test] @@ -824,7 +811,6 @@ mod test { #[test] fn test_read_read_read() { - use std::rt::rtio::*; let addr = next_test_ip4(); static MAX: uint = 5000; let (port, chan) = Chan::new(); @@ -843,20 +829,18 @@ mod test { } } - do spawn { - port.recv(); - let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - total_bytes_read += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } + port.recv(); + let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); + let mut buf = [0, .. 2048]; + let mut total_bytes_read = 0; + while total_bytes_read < MAX { + let nread = stream.read(buf).unwrap(); + total_bytes_read += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } - uvdebug!("read {} bytes total", total_bytes_read); } + uvdebug!("read {} bytes total", total_bytes_read); } #[test] @@ -922,65 +906,35 @@ mod test { assert!(total_bytes_sent >= MAX); } - do spawn { - let l = local_loop(); - let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); - let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); - let (port, chan) = (p2, c1); - port.recv(); - chan.send(()); - let mut total_bytes_recv = 0; - let mut buf = [0, .. 2048]; - while total_bytes_recv < MAX { - // ask for more - assert!(client_out.sendto([1], server_in_addr).is_ok()); - // wait for data - let res = client_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(src, server_out_addr); - total_bytes_recv += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } + let l = local_loop(); + let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); + let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); + let (port, chan) = (p2, c1); + port.recv(); + chan.send(()); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } - // tell the server we're done - assert!(client_out.sendto([0], server_in_addr).is_ok()); } + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); } #[test] fn test_read_and_block() { let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawn { - let listener = TcpListener::bind(local_loop(), addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - let (port2, chan2) = Chan::new(); - chan.send(port2); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for i in range(0u, nread) { - let val = buf[i] as uint; - assert_eq!(val, current % 8); - current += 1; - } - reads += 1; - - chan2.send(()); - } - - // Make sure we had multiple reads - assert!(reads > 1); - } + let (port, chan) = Chan::>::new(); do spawn { let port2 = port.recv(); @@ -992,13 +946,39 @@ mod test { stream.write([0, 1, 2, 3, 4, 5, 6, 7]); port2.recv(); } + + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let (port2, chan2) = Chan::new(); + chan.send(port2); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + + let expected = 32; + let mut current = 0; + let mut reads = 0; + + while current < expected { + let nread = stream.read(buf).unwrap(); + for i in range(0u, nread) { + let val = buf[i] as uint; + assert_eq!(val, current % 8); + current += 1; + } + reads += 1; + + chan2.try_send(()); + } + + // Make sure we had multiple reads + assert!(reads > 1); } #[test] fn test_simple_tcp_server_and_client_on_diff_threads() { let addr = next_test_ip4(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { let listener = TcpListener::bind(local_loop(), addr).unwrap(); let mut acceptor = listener.listen().unwrap(); let mut stream = acceptor.accept().unwrap(); @@ -1010,131 +990,11 @@ mod test { } } - do task::spawn_sched(task::SingleThreaded) { - let mut stream = TcpWatcher::connect(local_loop(), addr); - while stream.is_err() { - stream = TcpWatcher::connect(local_loop(), addr); - } - stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); - } - } - - // On one thread, create a udp socket. Then send that socket to another - // thread and destroy the socket on the remote thread. This should make sure - // that homing kicks in for the socket to go back home to the original - // thread, close itself, and then come back to the last thread. - #[test] - fn test_homing_closes_correctly() { - let (port, chan) = Chan::new(); - - do task::spawn_sched(task::SingleThreaded) { - let listener = UdpWatcher::bind(local_loop(), next_test_ip4()).unwrap(); - chan.send(listener); - } - - do task::spawn_sched(task::SingleThreaded) { - port.recv(); - } - } - - // This is a bit of a crufty old test, but it has its uses. - #[test] - fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { - use std::cast; - use std::rt::local::Local; - use std::rt::rtio::{EventLoop, IoFactory}; - use std::rt::sched::Scheduler; - use std::rt::sched::{Shutdown, TaskFromFriend}; - use std::rt::sleeper_list::SleeperList; - use std::rt::task::Task; - use std::rt::thread::Thread; - use std::rt::deque::BufferPool; - use std::task::TaskResult; - use std::unstable::run_in_bare_thread; - use uvio::UvEventLoop; - - do run_in_bare_thread { - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (worker1, stealer1) = pool.deque(); - let (worker2, stealer2) = pool.deque(); - let queues = ~[stealer1, stealer2]; - - let loop1 = ~UvEventLoop::new() as ~EventLoop; - let mut sched1 = ~Scheduler::new(loop1, worker1, queues.clone(), - sleepers.clone()); - let loop2 = ~UvEventLoop::new() as ~EventLoop; - let mut sched2 = ~Scheduler::new(loop2, worker2, queues.clone(), - sleepers.clone()); - - let handle1 = sched1.make_handle(); - let handle2 = sched2.make_handle(); - let tasksFriendHandle = sched2.make_handle(); - - let on_exit: proc(TaskResult) = proc(exit_status) { - let mut handle1 = handle1; - let mut handle2 = handle2; - handle1.send(Shutdown); - handle2.send(Shutdown); - assert!(exit_status.is_ok()); - }; - - unsafe fn local_io() -> &'static mut IoFactory { - let mut sched = Local::borrow(None::); - let io = sched.get().event_loop.io(); - cast::transmute(io.unwrap()) - } - - let test_function: proc() = proc() { - let io = unsafe { local_io() }; - let addr = next_test_ip4(); - let maybe_socket = io.udp_bind(addr); - // this socket is bound to this event loop - assert!(maybe_socket.is_ok()); - - // block self on sched1 - let scheduler: ~Scheduler = Local::take(); - let mut tasksFriendHandle = Some(tasksFriendHandle); - scheduler.deschedule_running_task_and_then(|_, task| { - // unblock task - task.wake().map(|task| { - // send self to sched2 - tasksFriendHandle.take_unwrap() - .send(TaskFromFriend(task)); - }); - // sched1 should now sleep since it has nothing else to do - }) - // sched2 will wake up and get the task as we do nothing else, - // the function ends and the socket goes out of scope sched2 - // will start to run the destructor the destructor will first - // block the task, set it's home as sched1, then enqueue it - // sched2 will dequeue the task, see that it has a home, and - // send it to sched1 sched1 will wake up, exec the close - // function on the correct loop, and then we're done - }; - - let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, - test_function); - main_task.death.on_exit = Some(on_exit); - - let null_task = ~do Task::new_root(&mut sched2.stack_pool, None) { - // nothing - }; - - let main_task = main_task; - let sched1 = sched1; - let thread1 = do Thread::start { - sched1.bootstrap(main_task); - }; - - let sched2 = sched2; - let thread2 = do Thread::start { - sched2.bootstrap(null_task); - }; - - thread1.join(); - thread2.join(); + let mut stream = TcpWatcher::connect(local_loop(), addr); + while stream.is_err() { + stream = TcpWatcher::connect(local_loop(), addr); } + stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); } #[should_fail] #[test] @@ -1176,7 +1036,7 @@ mod test { // force the handle to be created on a different scheduler, failure in // the original task will force a homing operation back to this // scheduler. - do task::spawn_sched(task::SingleThreaded) { + do spawn { let w = UdpWatcher::bind(local_loop(), addr).unwrap(); chan.send(w); } @@ -1184,67 +1044,4 @@ mod test { let _w = port.recv(); fail!(); } - - #[should_fail] - #[test] - #[ignore(reason = "linked failure")] - fn linked_failure1() { - let (port, chan) = Chan::new(); - let addr = next_test_ip4(); - - do spawn { - let w = TcpListener::bind(local_loop(), addr).unwrap(); - let mut w = w.listen().unwrap(); - chan.send(()); - w.accept(); - } - - port.recv(); - fail!(); - } - - #[should_fail] - #[test] - #[ignore(reason = "linked failure")] - fn linked_failure2() { - let (port, chan) = Chan::new(); - let addr = next_test_ip4(); - - do spawn { - let w = TcpListener::bind(local_loop(), addr).unwrap(); - let mut w = w.listen().unwrap(); - chan.send(()); - let mut buf = [0]; - w.accept().unwrap().read(buf); - } - - port.recv(); - let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); - - fail!(); - } - - #[should_fail] - #[test] - #[ignore(reason = "linked failure")] - fn linked_failure3() { - let (port, chan) = Chan::new(); - let addr = next_test_ip4(); - - do spawn { - let chan = chan; - let w = TcpListener::bind(local_loop(), addr).unwrap(); - let mut w = w.listen().unwrap(); - chan.send(()); - let mut conn = w.accept().unwrap(); - chan.send(()); - let buf = [0, ..65536]; - conn.write(buf); - } - - port.recv(); - let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); - port.recv(); - fail!(); - } } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 814205cbbf1cc..ff4481e8b97f4 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -9,35 +9,33 @@ // except according to those terms. use std::c_str::CString; -use std::libc; -use std::rt::BlockedTask; use std::io::IoError; -use std::rt::local::Local; +use std::libc; use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor}; -use std::rt::sched::{Scheduler, SchedHandle}; -use std::rt::tube::Tube; +use std::rt::task::BlockedTask; +use homing::{HomingIO, HomeHandle}; use stream::StreamWatcher; use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error, - wait_until_woken_after}; -use uvio::HomingIO; + wait_until_woken_after, wakeup}; +use uvio::UvIoFactory; use uvll; pub struct PipeWatcher { stream: StreamWatcher, - home: SchedHandle, + home: HomeHandle, priv defused: bool, } pub struct PipeListener { - home: SchedHandle, + home: HomeHandle, pipe: *uvll::uv_pipe_t, - priv outgoing: Tube>, + priv outgoing: Chan>, + priv incoming: Port>, } pub struct PipeAcceptor { listener: ~PipeListener, - priv incoming: Tube>, } // PipeWatcher implementation and traits @@ -46,7 +44,12 @@ impl PipeWatcher { // Creates an uninitialized pipe watcher. The underlying uv pipe is ready to // get bound to some other source (this is normally a helper method paired // with another call). - pub fn new(loop_: &Loop, ipc: bool) -> PipeWatcher { + pub fn new(io: &mut UvIoFactory, ipc: bool) -> PipeWatcher { + let home = io.make_handle(); + PipeWatcher::new_home(&io.loop_, home, ipc) + } + + pub fn new_home(loop_: &Loop, home: HomeHandle, ipc: bool) -> PipeWatcher { let handle = unsafe { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); assert!(!handle.is_null()); @@ -56,26 +59,28 @@ impl PipeWatcher { }; PipeWatcher { stream: StreamWatcher::new(handle), - home: get_handle_to_current_scheduler!(), + home: home, defused: false, } } - pub fn open(loop_: &Loop, file: libc::c_int) -> Result + pub fn open(io: &mut UvIoFactory, file: libc::c_int) + -> Result { - let pipe = PipeWatcher::new(loop_, false); + let pipe = PipeWatcher::new(io, false); match unsafe { uvll::uv_pipe_open(pipe.handle(), file) } { 0 => Ok(pipe), n => Err(UvError(n)) } } - pub fn connect(loop_: &Loop, name: &CString) -> Result + pub fn connect(io: &mut UvIoFactory, name: &CString) + -> Result { struct Ctx { task: Option, result: libc::c_int, } let mut cx = Ctx { task: None, result: 0 }; let mut req = Request::new(uvll::UV_CONNECT); - let pipe = PipeWatcher::new(loop_, false); + let pipe = PipeWatcher::new(io, false); wait_until_woken_after(&mut cx.task, || { unsafe { @@ -97,8 +102,7 @@ impl PipeWatcher { assert!(status != uvll::ECANCELED); let cx: &mut Ctx = unsafe { req.get_data() }; cx.result = status; - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + wakeup(&mut cx.task); } } @@ -125,7 +129,7 @@ impl RtioPipe for PipeWatcher { } impl HomingIO for PipeWatcher { - fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } + fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home } } impl UvHandle for PipeWatcher { @@ -144,8 +148,10 @@ impl Drop for PipeWatcher { // PipeListener implementation and traits impl PipeListener { - pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> { - let pipe = PipeWatcher::new(loop_, false); + pub fn bind(io: &mut UvIoFactory, name: &CString) + -> Result<~PipeListener, UvError> + { + let pipe = PipeWatcher::new(io, false); match unsafe { uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p)) } { @@ -153,10 +159,12 @@ impl PipeListener { // If successful, unwrap the PipeWatcher because we control how // we close the pipe differently. We can't rely on // StreamWatcher's default close method. + let (port, chan) = Chan::new(); let p = ~PipeListener { - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), pipe: pipe.unwrap(), - outgoing: Tube::new(), + incoming: port, + outgoing: chan, }; Ok(p.install()) } @@ -168,11 +176,7 @@ impl PipeListener { impl RtioUnixListener for PipeListener { fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> { // create the acceptor object from ourselves - let incoming = self.outgoing.clone(); - let mut acceptor = ~PipeAcceptor { - listener: self, - incoming: incoming, - }; + let mut acceptor = ~PipeAcceptor { listener: self }; let _m = acceptor.fire_homing_missile(); // XXX: the 128 backlog should be configurable @@ -184,7 +188,7 @@ impl RtioUnixListener for PipeListener { } impl HomingIO for PipeListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle for PipeListener { @@ -193,19 +197,19 @@ impl UvHandle for PipeListener { extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { assert!(status != uvll::ECANCELED); + + let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) }; let msg = match status { 0 => { let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); - let client = PipeWatcher::new(&loop_, false); + let client = PipeWatcher::new_home(&loop_, pipe.home().clone(), false); assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0); Ok(~client as ~RtioPipe) } n => Err(uv_error_to_io_error(UvError(n))) }; - - let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) }; pipe.outgoing.send(msg); } @@ -220,21 +224,20 @@ impl Drop for PipeListener { impl RtioUnixAcceptor for PipeAcceptor { fn accept(&mut self) -> Result<~RtioPipe, IoError> { - let _m = self.fire_homing_missile(); - self.incoming.recv() + self.listener.incoming.recv() } } impl HomingIO for PipeAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.listener.home } } #[cfg(test)] mod tests { use std::rt::rtio::{RtioUnixListener, RtioUnixAcceptor, RtioPipe}; - use std::rt::test::next_test_unix; + use std::io::test::next_test_unix; - use super::*; + use super::{PipeWatcher, PipeListener}; use super::super::local_loop; #[test] diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 9e359e26f03d6..7b7a16d7084e5 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -8,32 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::io::IoError; +use std::io::process; use std::libc::c_int; use std::libc; use std::ptr; -use std::rt::BlockedTask; -use std::io::IoError; -use std::io::process::*; -use std::rt::local::Local; use std::rt::rtio::RtioProcess; -use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::task::BlockedTask; use std::vec; -use super::{Loop, UvHandle, UvError, uv_error_to_io_error, - wait_until_woken_after}; -use uvio::HomingIO; -use uvll; +use homing::{HomingIO, HomeHandle}; use pipe::PipeWatcher; +use super::{UvHandle, UvError, uv_error_to_io_error, + wait_until_woken_after, wakeup}; +use uvio::UvIoFactory; +use uvll; pub struct Process { handle: *uvll::uv_process_t, - home: SchedHandle, + home: HomeHandle, /// Task to wake up (may be null) for when the process exits to_wake: Option, /// Collected from the exit_cb - exit_status: Option, + exit_status: Option, } impl Process { @@ -41,7 +40,7 @@ impl Process { /// /// Returns either the corresponding process object or an error which /// occurred. - pub fn spawn(loop_: &Loop, config: ProcessConfig) + pub fn spawn(io_loop: &mut UvIoFactory, config: process::ProcessConfig) -> Result<(~Process, ~[Option]), UvError> { let cwd = config.cwd.map(|s| s.to_c_str()); @@ -52,7 +51,7 @@ impl Process { stdio.set_len(io.len()); for (slot, other) in stdio.iter().zip(io.iter()) { let io = set_stdio(slot as *uvll::uv_stdio_container_t, other, - loop_); + io_loop); ret_io.push(io); } } @@ -78,12 +77,12 @@ impl Process { let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); let process = ~Process { handle: handle, - home: get_handle_to_current_scheduler!(), + home: io_loop.make_handle(), to_wake: None, exit_status: None, }; match unsafe { - uvll::uv_spawn(loop_.handle, handle, &options) + uvll::uv_spawn(io_loop.uv_loop(), handle, &options) } { 0 => Ok(process.install()), err => Err(UvError(err)), @@ -105,33 +104,28 @@ extern fn on_exit(handle: *uvll::uv_process_t, assert!(p.exit_status.is_none()); p.exit_status = Some(match term_signal { - 0 => ExitStatus(exit_status as int), - n => ExitSignal(n as int), + 0 => process::ExitStatus(exit_status as int), + n => process::ExitSignal(n as int), }); - match p.to_wake.take() { - Some(task) => { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task); - } - None => {} - } + if p.to_wake.is_none() { return } + wakeup(&mut p.to_wake); } unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, - io: &StdioContainer, - loop_: &Loop) -> Option { + io: &process::StdioContainer, + io_loop: &mut UvIoFactory) -> Option { match *io { - Ignored => { + process::Ignored => { uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE); None } - InheritFd(fd) => { + process::InheritFd(fd) => { uvll::set_stdio_container_flags(dst, uvll::STDIO_INHERIT_FD); uvll::set_stdio_container_fd(dst, fd); None } - CreatePipe(readable, writable) => { + process::CreatePipe(readable, writable) => { let mut flags = uvll::STDIO_CREATE_PIPE as libc::c_int; if readable { flags |= uvll::STDIO_READABLE_PIPE as libc::c_int; @@ -139,7 +133,7 @@ unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, if writable { flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int; } - let pipe = PipeWatcher::new(loop_, false); + let pipe = PipeWatcher::new(io_loop, false); uvll::set_stdio_container_flags(dst, flags); uvll::set_stdio_container_stream(dst, pipe.handle()); Some(pipe) @@ -186,7 +180,7 @@ fn with_env(env: Option<&[(~str, ~str)]>, f: |**libc::c_char| -> T) -> T { } impl HomingIO for Process { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle for Process { @@ -208,7 +202,7 @@ impl RtioProcess for Process { } } - fn wait(&mut self) -> ProcessExit { + fn wait(&mut self) -> process::ProcessExit { // Make sure (on the home scheduler) that we have an exit status listed let _m = self.fire_homing_missile(); match self.exit_status { diff --git a/src/librustuv/queue.rs b/src/librustuv/queue.rs new file mode 100644 index 0000000000000..32f8d8532a209 --- /dev/null +++ b/src/librustuv/queue.rs @@ -0,0 +1,192 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A concurrent queue used to signal remote event loops +//! +//! This queue implementation is used to send tasks among event loops. This is +//! backed by a multi-producer/single-consumer queue from libstd and uv_async_t +//! handles (to wake up a remote event loop). +//! +//! The uv_async_t is stored next to the event loop, so in order to not keep the +//! event loop alive we use uv_ref and uv_unref in order to control when the +//! async handle is active or not. + +#[allow(dead_code)]; + +use std::cast; +use std::libc::{c_void, c_int}; +use std::rt::task::BlockedTask; +use std::unstable::sync::LittleLock; +use mpsc = std::sync::mpsc_queue; + +use async::AsyncWatcher; +use super::{Loop, UvHandle}; +use uvll; + +enum Message { + Task(BlockedTask), + Increment, + Decrement, +} + +struct State { + handle: *uvll::uv_async_t, + lock: LittleLock, // see comments in async_cb for why this is needed +} + +/// This structure is intended to be stored next to the event loop, and it is +/// used to create new `Queue` structures. +pub struct QueuePool { + priv producer: mpsc::Producer, + priv consumer: mpsc::Consumer, + priv refcnt: uint, +} + +/// This type is used to send messages back to the original event loop. +pub struct Queue { + priv queue: mpsc::Producer, +} + +extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { + assert_eq!(status, 0); + let state: &mut QueuePool = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + let packet = unsafe { state.consumer.packet() }; + + // Remember that there is no guarantee about how many times an async + // callback is called with relation to the number of sends, so process the + // entire queue in a loop. + loop { + match state.consumer.pop() { + mpsc::Data(Task(task)) => { + task.wake().map(|t| t.reawaken(true)); + } + mpsc::Data(Increment) => unsafe { + if state.refcnt == 0 { + uvll::uv_ref((*packet).handle); + } + state.refcnt += 1; + }, + mpsc::Data(Decrement) => unsafe { + state.refcnt -= 1; + if state.refcnt == 0 { + uvll::uv_unref((*packet).handle); + } + }, + mpsc::Empty | mpsc::Inconsistent => break + }; + } + + // If the refcount is now zero after processing the queue, then there is no + // longer a reference on the async handle and it is possible that this event + // loop can exit. What we're not guaranteed, however, is that a producer in + // the middle of dropping itself is yet done with the handle. It could be + // possible that we saw their Decrement message but they have yet to signal + // on the async handle. If we were to return immediately, the entire uv loop + // could be destroyed meaning the call to uv_async_send would abort() + // + // In order to fix this, an OS mutex is used to wait for the other end to + // finish before we continue. The drop block on a handle will acquire a + // mutex and then drop it after both the push and send have been completed. + // If we acquire the mutex here, then we are guaranteed that there are no + // longer any senders which are holding on to their handles, so we can + // safely allow the event loop to exit. + if state.refcnt == 0 { + unsafe { + let _l = (*packet).lock.lock(); + } + } +} + +impl QueuePool { + pub fn new(loop_: &mut Loop) -> ~QueuePool { + let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); + let (c, p) = mpsc::queue(State { + handle: handle, + lock: LittleLock::new(), + }); + let q = ~QueuePool { + producer: p, + consumer: c, + refcnt: 0, + }; + + unsafe { + assert_eq!(uvll::uv_async_init(loop_.handle, handle, async_cb), 0); + uvll::uv_unref(handle); + let data: *c_void = *cast::transmute::<&~QueuePool, &*c_void>(&q); + uvll::set_data_for_uv_handle(handle, data); + } + + return q; + } + + pub fn queue(&mut self) -> Queue { + unsafe { + if self.refcnt == 0 { + uvll::uv_ref((*self.producer.packet()).handle); + } + self.refcnt += 1; + } + Queue { queue: self.producer.clone() } + } + + pub fn handle(&self) -> *uvll::uv_async_t { + unsafe { (*self.producer.packet()).handle } + } +} + +impl Queue { + pub fn push(&mut self, task: BlockedTask) { + self.queue.push(Task(task)); + unsafe { + uvll::uv_async_send((*self.queue.packet()).handle); + } + } +} + +impl Clone for Queue { + fn clone(&self) -> Queue { + // Push a request to increment on the queue, but there's no need to + // signal the event loop to process it at this time. We're guaranteed + // that the count is at least one (because we have a queue right here), + // and if the queue is dropped later on it'll see the increment for the + // decrement anyway. + unsafe { + cast::transmute_mut(self).queue.push(Increment); + } + Queue { queue: self.queue.clone() } + } +} + +impl Drop for Queue { + fn drop(&mut self) { + // See the comments in the async_cb function for why there is a lock + // that is acquired only on a drop. + unsafe { + let state = self.queue.packet(); + let _l = (*state).lock.lock(); + self.queue.push(Decrement); + uvll::uv_async_send((*state).handle); + } + } +} + +impl Drop for State { + fn drop(&mut self) { + unsafe { + uvll::uv_close(self.handle, cast::transmute(0)); + // Note that this does *not* free the handle, that is the + // responsibility of the caller because the uv loop must be closed + // before we deallocate this uv handle. + } + } +} diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index f082aef003c60..6772c6d193614 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -10,34 +10,33 @@ use std::libc::c_int; use std::io::signal::Signum; -use std::rt::sched::{SchedHandle, Scheduler}; use std::comm::SharedChan; -use std::rt::local::Local; use std::rt::rtio::RtioSignal; -use super::{Loop, UvError, UvHandle}; +use homing::{HomingIO, HomeHandle}; +use super::{UvError, UvHandle}; use uvll; -use uvio::HomingIO; +use uvio::UvIoFactory; pub struct SignalWatcher { handle: *uvll::uv_signal_t, - home: SchedHandle, + home: HomeHandle, channel: SharedChan, signal: Signum, } impl SignalWatcher { - pub fn new(loop_: &mut Loop, signum: Signum, + pub fn new(io: &mut UvIoFactory, signum: Signum, channel: SharedChan) -> Result<~SignalWatcher, UvError> { let s = ~SignalWatcher { handle: UvHandle::alloc(None::, uvll::UV_SIGNAL), - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), channel: channel, signal: signum, }; assert_eq!(unsafe { - uvll::uv_signal_init(loop_.handle, s.handle) + uvll::uv_signal_init(io.uv_loop(), s.handle) }, 0); match unsafe { @@ -53,11 +52,11 @@ impl SignalWatcher { extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { let s: &mut SignalWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; assert_eq!(signum as int, s.signal as int); - s.channel.send_deferred(s.signal); + s.channel.try_send(s.signal); } impl HomingIO for SignalWatcher { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle for SignalWatcher { @@ -69,15 +68,15 @@ impl RtioSignal for SignalWatcher {} impl Drop for SignalWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); - self.close_async_(); + self.close(); } } #[cfg(test)] mod test { - use super::*; use super::super::local_loop; use std::io::signal; + use super::SignalWatcher; #[test] fn closing_channel_during_drop_doesnt_kill_everything() { diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index 0304b89dd6fde..73173fc677e8f 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -11,12 +11,10 @@ use std::cast; use std::libc::{c_int, size_t, ssize_t}; use std::ptr; -use std::rt::BlockedTask; -use std::rt::local::Local; -use std::rt::sched::Scheduler; +use std::rt::task::BlockedTask; use super::{UvError, Buf, slice_to_uv_buf, Request, wait_until_woken_after, - ForbidUnwind}; + ForbidUnwind, wakeup}; use uvll; // This is a helper structure which is intended to get embedded into other @@ -164,8 +162,7 @@ extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: *Buf) { unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } rcx.result = nread; - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(rcx.task.take_unwrap()); + wakeup(&mut rcx.task); } // Unlike reading, the WriteContext is stored in the uv_write_t request. Like @@ -180,6 +177,5 @@ extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { wcx.result = status; req.defuse(); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); + wakeup(&mut wcx.task); } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index ab143d6e8b077..4a0ad44d31147 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -9,19 +9,19 @@ // except according to those terms. use std::libc::c_int; -use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::rtio::RtioTimer; -use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::task::{BlockedTask, Task}; use std::util; +use homing::{HomeHandle, HomingIO}; +use super::{UvHandle, ForbidUnwind, ForbidSwitch}; +use uvio::UvIoFactory; use uvll; -use super::{Loop, UvHandle, ForbidUnwind, ForbidSwitch}; -use uvio::HomingIO; pub struct TimerWatcher { handle: *uvll::uv_timer_t, - home: SchedHandle, + home: HomeHandle, action: Option, id: uint, // see comments in timer_cb } @@ -33,15 +33,15 @@ pub enum NextAction { } impl TimerWatcher { - pub fn new(loop_: &mut Loop) -> ~TimerWatcher { + pub fn new(io: &mut UvIoFactory) -> ~TimerWatcher { let handle = UvHandle::alloc(None::, uvll::UV_TIMER); assert_eq!(unsafe { - uvll::uv_timer_init(loop_.handle, handle) + uvll::uv_timer_init(io.uv_loop(), handle) }, 0); let me = ~TimerWatcher { handle: handle, action: None, - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), id: 0, }; return me.install(); @@ -59,7 +59,7 @@ impl TimerWatcher { } impl HomingIO for TimerWatcher { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } + fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle for TimerWatcher { @@ -89,10 +89,11 @@ impl RtioTimer for TimerWatcher { // started, then we need to call stop on the timer. let _f = ForbidUnwind::new("timer"); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|_sched, task| { + let task: ~Task = Local::take(); + task.deschedule(1, |task| { self.action = Some(WakeTask(task)); self.start(msecs, 0); + Ok(()) }); self.stop(); } @@ -137,12 +138,11 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { match timer.action.take_unwrap() { WakeTask(task) => { - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(task); + task.wake().map(|t| t.reawaken(true)); } - SendOnce(chan) => { chan.try_send_deferred(()); } + SendOnce(chan) => { chan.try_send(()); } SendMany(chan, id) => { - chan.try_send_deferred(()); + chan.try_send(()); // Note that the above operation could have performed some form of // scheduling. This means that the timer may have decided to insert @@ -169,7 +169,7 @@ impl Drop for TimerWatcher { let _action = { let _m = self.fire_homing_missile(); self.stop(); - self.close_async_(); + self.close(); self.action.take() }; } @@ -177,9 +177,9 @@ impl Drop for TimerWatcher { #[cfg(test)] mod test { - use super::*; use std::rt::rtio::RtioTimer; use super::super::local_loop; + use super::TimerWatcher; #[test] fn oneshot() { @@ -207,9 +207,9 @@ mod test { let port = timer.period(1); port.recv(); port.recv(); - let port = timer.period(1); - port.recv(); - port.recv(); + let port2 = timer.period(1); + port2.recv(); + port2.recv(); } #[test] diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index fcad629657996..0e76ed9feb93d 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -10,24 +10,23 @@ use std::libc; use std::io::IoError; -use std::rt::local::Local; use std::rt::rtio::RtioTTY; -use std::rt::sched::{Scheduler, SchedHandle}; +use homing::{HomingIO, HomeHandle}; use stream::StreamWatcher; -use super::{Loop, UvError, UvHandle, uv_error_to_io_error}; -use uvio::HomingIO; +use super::{UvError, UvHandle, uv_error_to_io_error}; +use uvio::UvIoFactory; use uvll; pub struct TtyWatcher{ tty: *uvll::uv_tty_t, stream: StreamWatcher, - home: SchedHandle, + home: HomeHandle, fd: libc::c_int, } impl TtyWatcher { - pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) + pub fn new(io: &mut UvIoFactory, fd: libc::c_int, readable: bool) -> Result { // libuv may succeed in giving us a handle (via uv_tty_init), but if the @@ -56,14 +55,14 @@ impl TtyWatcher { // with attempting to open it as a tty. let handle = UvHandle::alloc(None::, uvll::UV_TTY); match unsafe { - uvll::uv_tty_init(loop_.handle, handle, fd as libc::c_int, + uvll::uv_tty_init(io.uv_loop(), handle, fd as libc::c_int, readable as libc::c_int) } { 0 => { Ok(TtyWatcher { tty: handle, stream: StreamWatcher::new(handle), - home: get_handle_to_current_scheduler!(), + home: io.make_handle(), fd: fd, }) } @@ -120,7 +119,7 @@ impl UvHandle for TtyWatcher { } impl HomingIO for TtyWatcher { - fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } + fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home } } impl Drop for TtyWatcher { diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index c556b96671ab6..dbf129d0b699c 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -9,121 +9,41 @@ // except according to those terms. use std::c_str::CString; +use std::cast; use std::comm::SharedChan; -use std::libc::c_int; -use std::libc; -use std::path::Path; use std::io::IoError; use std::io::net::ip::SocketAddr; use std::io::process::ProcessConfig; -use std::io; -use std::rt::local::Local; -use std::rt::rtio::*; -use std::rt::sched::{Scheduler, SchedHandle}; -use std::rt::task::Task; -use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, - S_IRUSR, S_IWUSR}; -use std::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, - ReadWrite, FileStat}; use std::io::signal::Signum; +use std::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, + ReadWrite, FileStat}; +use std::io; +use std::libc::c_int; +use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR, + S_IWUSR}; +use std::libc; +use std::path::Path; +use std::rt::rtio; +use std::rt::rtio::IoFactory; use ai = std::io::net::addrinfo; #[cfg(test)] use std::unstable::run_in_bare_thread; -use super::*; -use addrinfo::GetAddrInfoRequest; - -pub trait HomingIO { - - fn home<'r>(&'r mut self) -> &'r mut SchedHandle; - - /// This function will move tasks to run on their home I/O scheduler. Note - /// that this function does *not* pin the task to the I/O scheduler, but - /// rather it simply moves it to running on the I/O scheduler. - fn go_to_IO_home(&mut self) -> uint { - use std::rt::sched::RunOnce; - - let _f = ForbidUnwind::new("going home"); - - let current_sched_id = { - let mut sched = Local::borrow(None::); - sched.get().sched_id() - }; - - // Only need to invoke a context switch if we're not on the right - // scheduler. - if current_sched_id != self.home().sched_id { - let scheduler: ~Scheduler = Local::take(); - scheduler.deschedule_running_task_and_then(|_, task| { - task.wake().map(|task| { - self.home().send(RunOnce(task)); - }); - }) - } - let current_sched_id = { - let mut sched = Local::borrow(None::); - sched.get().sched_id() - }; - assert!(current_sched_id == self.home().sched_id); - - self.home().sched_id - } - - /// Fires a single homing missile, returning another missile targeted back - /// at the original home of this task. In other words, this function will - /// move the local task to its I/O scheduler and then return an RAII wrapper - /// which will return the task home. - fn fire_homing_missile(&mut self) -> HomingMissile { - HomingMissile { io_home: self.go_to_IO_home() } - } +use super::{uv_error_to_io_error, Loop}; - /// Same as `fire_homing_missile`, but returns the local I/O scheduler as - /// well (the one that was homed to). - fn fire_homing_missile_sched(&mut self) -> (HomingMissile, ~Scheduler) { - // First, transplant ourselves to the home I/O scheduler - let missile = self.fire_homing_missile(); - // Next (must happen next), grab the local I/O scheduler - let io_sched: ~Scheduler = Local::take(); - - (missile, io_sched) - } -} - -/// After a homing operation has been completed, this will return the current -/// task back to its appropriate home (if applicable). The field is used to -/// assert that we are where we think we are. -struct HomingMissile { - priv io_home: uint, -} - -impl HomingMissile { - pub fn check(&self, msg: &'static str) { - let mut sched = Local::borrow(None::); - let local_id = sched.get().sched_id(); - assert!(local_id == self.io_home, "{}", msg); - } -} - -impl Drop for HomingMissile { - fn drop(&mut self) { - let _f = ForbidUnwind::new("leaving home"); - - // It would truly be a sad day if we had moved off the home I/O - // scheduler while we were doing I/O. - self.check("task moved away from the home scheduler"); - - // If we were a homed task, then we must send ourselves back to the - // original scheduler. Otherwise, we can just return and keep running - if !Task::on_appropriate_sched() { - let scheduler: ~Scheduler = Local::take(); - scheduler.deschedule_running_task_and_then(|_, task| { - task.wake().map(|task| { - Scheduler::run_task(task); - }); - }) - } - } -} +use addrinfo::GetAddrInfoRequest; +use async::AsyncWatcher; +use file::{FsRequest, FileWatcher}; +use queue::QueuePool; +use homing::HomeHandle; +use idle::IdleWatcher; +use net::{TcpWatcher, TcpListener, UdpWatcher}; +use pipe::{PipeWatcher, PipeListener}; +use process::Process; +use signal::SignalWatcher; +use timer::TimerWatcher; +use tty::TtyWatcher; +use uvll; // Obviously an Event Loop is always home. pub struct UvEventLoop { @@ -132,49 +52,65 @@ pub struct UvEventLoop { impl UvEventLoop { pub fn new() -> UvEventLoop { + let mut loop_ = Loop::new(); + let handle_pool = QueuePool::new(&mut loop_); UvEventLoop { - uvio: UvIoFactory(Loop::new()) + uvio: UvIoFactory { + loop_: loop_, + handle_pool: Some(handle_pool), + } } } } impl Drop for UvEventLoop { fn drop(&mut self) { - self.uvio.uv_loop().close(); + // Must first destroy the pool of handles before we destroy the loop + // because otherwise the contained async handle will be destroyed after + // the loop is free'd (use-after-free). We also must free the uv handle + // after the loop has been closed because during the closing of the loop + // the handle is required to be used apparently. + let handle = self.uvio.handle_pool.get_ref().handle(); + self.uvio.handle_pool.take(); + self.uvio.loop_.close(); + unsafe { uvll::free_handle(handle) } } } -impl EventLoop for UvEventLoop { +impl rtio::EventLoop for UvEventLoop { fn run(&mut self) { - self.uvio.uv_loop().run(); + self.uvio.loop_.run(); } fn callback(&mut self, f: proc()) { - IdleWatcher::onetime(self.uvio.uv_loop(), f); + IdleWatcher::onetime(&mut self.uvio.loop_, f); } - fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback { - IdleWatcher::new(self.uvio.uv_loop(), cb) as ~PausableIdleCallback + fn pausable_idle_callback(&mut self, cb: ~rtio::Callback) + -> ~rtio::PausableIdleCallback + { + IdleWatcher::new(&mut self.uvio.loop_, cb) as ~rtio::PausableIdleCallback } - fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { - ~AsyncWatcher::new(self.uvio.uv_loop(), f) as ~RemoteCallback + fn remote_callback(&mut self, f: ~rtio::Callback) -> ~rtio::RemoteCallback { + ~AsyncWatcher::new(&mut self.uvio.loop_, f) as ~rtio::RemoteCallback } - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { - let factory = &mut self.uvio as &mut IoFactory; + fn io<'a>(&'a mut self) -> Option<&'a mut rtio::IoFactory> { + let factory = &mut self.uvio as &mut rtio::IoFactory; Some(factory) } } #[cfg(not(test))] #[lang = "event_loop_factory"] -pub extern "C" fn new_loop() -> ~EventLoop { - ~UvEventLoop::new() as ~EventLoop +pub fn new_loop() -> ~rtio::EventLoop { + ~UvEventLoop::new() as ~rtio::EventLoop } #[test] fn test_callback_run_once() { + use std::rt::rtio::EventLoop; do run_in_bare_thread { let mut event_loop = UvEventLoop::new(); let mut count = 0; @@ -187,11 +123,19 @@ fn test_callback_run_once() { } } -pub struct UvIoFactory(Loop); +pub struct UvIoFactory { + loop_: Loop, + priv handle_pool: Option<~QueuePool>, +} impl UvIoFactory { - pub fn uv_loop<'a>(&'a mut self) -> &'a mut Loop { - match self { &UvIoFactory(ref mut ptr) => ptr } + pub fn uv_loop<'a>(&mut self) -> *uvll::uv_loop_t { self.loop_.handle } + + pub fn make_handle(&mut self) -> HomeHandle { + // It's understood by the homing code that the "local id" is just the + // pointer of the local I/O factory cast to a uint. + let id: uint = unsafe { cast::transmute_copy(&self) }; + HomeHandle::new(id, &mut **self.handle_pool.get_mut_ref()) } } @@ -200,46 +144,45 @@ impl IoFactory for UvIoFactory { // NB: This blocks the task waiting on the connection. // It would probably be better to return a future fn tcp_connect(&mut self, addr: SocketAddr) - -> Result<~RtioTcpStream, IoError> + -> Result<~rtio::RtioTcpStream, IoError> { - match TcpWatcher::connect(self.uv_loop(), addr) { - Ok(t) => Ok(~t as ~RtioTcpStream), + match TcpWatcher::connect(self, addr) { + Ok(t) => Ok(~t as ~rtio::RtioTcpStream), Err(e) => Err(uv_error_to_io_error(e)), } } - fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError> { - match TcpListener::bind(self.uv_loop(), addr) { - Ok(t) => Ok(t as ~RtioTcpListener), + fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioTcpListener, IoError> { + match TcpListener::bind(self, addr) { + Ok(t) => Ok(t as ~rtio::RtioTcpListener), Err(e) => Err(uv_error_to_io_error(e)), } } - fn udp_bind(&mut self, addr: SocketAddr) -> Result<~RtioUdpSocket, IoError> { - match UdpWatcher::bind(self.uv_loop(), addr) { - Ok(u) => Ok(~u as ~RtioUdpSocket), + fn udp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioUdpSocket, IoError> { + match UdpWatcher::bind(self, addr) { + Ok(u) => Ok(~u as ~rtio::RtioUdpSocket), Err(e) => Err(uv_error_to_io_error(e)), } } - fn timer_init(&mut self) -> Result<~RtioTimer, IoError> { - Ok(TimerWatcher::new(self.uv_loop()) as ~RtioTimer) + fn timer_init(&mut self) -> Result<~rtio::RtioTimer, IoError> { + Ok(TimerWatcher::new(self) as ~rtio::RtioTimer) } fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, hint: Option) -> Result<~[ai::Info], IoError> { - let r = GetAddrInfoRequest::run(self.uv_loop(), host, servname, hint); + let r = GetAddrInfoRequest::run(&self.loop_, host, servname, hint); r.map_err(uv_error_to_io_error) } fn fs_from_raw_fd(&mut self, fd: c_int, - close: CloseBehavior) -> ~RtioFileStream { - let loop_ = Loop::wrap(self.uv_loop().handle); - ~FileWatcher::new(loop_, fd, close) as ~RtioFileStream + close: rtio::CloseBehavior) -> ~rtio::RtioFileStream { + ~FileWatcher::new(self, fd, close) as ~rtio::RtioFileStream } fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess) - -> Result<~RtioFileStream, IoError> { + -> Result<~rtio::RtioFileStream, IoError> { let flags = match fm { io::Open => 0, io::Append => libc::O_APPEND, @@ -254,117 +197,117 @@ impl IoFactory for UvIoFactory { libc::S_IRUSR | libc::S_IWUSR), }; - match FsRequest::open(self.uv_loop(), path, flags as int, mode as int) { - Ok(fs) => Ok(~fs as ~RtioFileStream), + match FsRequest::open(self, path, flags as int, mode as int) { + Ok(fs) => Ok(~fs as ~rtio::RtioFileStream), Err(e) => Err(uv_error_to_io_error(e)) } } fn fs_unlink(&mut self, path: &CString) -> Result<(), IoError> { - let r = FsRequest::unlink(self.uv_loop(), path); + let r = FsRequest::unlink(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_lstat(&mut self, path: &CString) -> Result { - let r = FsRequest::lstat(self.uv_loop(), path); + let r = FsRequest::lstat(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_stat(&mut self, path: &CString) -> Result { - let r = FsRequest::stat(self.uv_loop(), path); + let r = FsRequest::stat(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_mkdir(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - let r = FsRequest::mkdir(self.uv_loop(), path, perm as c_int); + let r = FsRequest::mkdir(&self.loop_, path, perm as c_int); r.map_err(uv_error_to_io_error) } fn fs_rmdir(&mut self, path: &CString) -> Result<(), IoError> { - let r = FsRequest::rmdir(self.uv_loop(), path); + let r = FsRequest::rmdir(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_rename(&mut self, path: &CString, to: &CString) -> Result<(), IoError> { - let r = FsRequest::rename(self.uv_loop(), path, to); + let r = FsRequest::rename(&self.loop_, path, to); r.map_err(uv_error_to_io_error) } fn fs_chmod(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - let r = FsRequest::chmod(self.uv_loop(), path, perm as c_int); + let r = FsRequest::chmod(&self.loop_, path, perm as c_int); r.map_err(uv_error_to_io_error) } fn fs_readdir(&mut self, path: &CString, flags: c_int) -> Result<~[Path], IoError> { - let r = FsRequest::readdir(self.uv_loop(), path, flags); + let r = FsRequest::readdir(&self.loop_, path, flags); r.map_err(uv_error_to_io_error) } fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - let r = FsRequest::link(self.uv_loop(), src, dst); + let r = FsRequest::link(&self.loop_, src, dst); r.map_err(uv_error_to_io_error) } fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - let r = FsRequest::symlink(self.uv_loop(), src, dst); + let r = FsRequest::symlink(&self.loop_, src, dst); r.map_err(uv_error_to_io_error) } fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> Result<(), IoError> { - let r = FsRequest::chown(self.uv_loop(), path, uid, gid); + let r = FsRequest::chown(&self.loop_, path, uid, gid); r.map_err(uv_error_to_io_error) } fn fs_readlink(&mut self, path: &CString) -> Result { - let r = FsRequest::readlink(self.uv_loop(), path); + let r = FsRequest::readlink(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64) -> Result<(), IoError> { - let r = FsRequest::utime(self.uv_loop(), path, atime, mtime); + let r = FsRequest::utime(&self.loop_, path, atime, mtime); r.map_err(uv_error_to_io_error) } fn spawn(&mut self, config: ProcessConfig) - -> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError> + -> Result<(~rtio::RtioProcess, ~[Option<~rtio::RtioPipe>]), IoError> { - match Process::spawn(self.uv_loop(), config) { + match Process::spawn(self, config) { Ok((p, io)) => { - Ok((p as ~RtioProcess, - io.move_iter().map(|i| i.map(|p| ~p as ~RtioPipe)).collect())) + Ok((p as ~rtio::RtioProcess, + io.move_iter().map(|i| i.map(|p| ~p as ~rtio::RtioPipe)).collect())) } Err(e) => Err(uv_error_to_io_error(e)), } } - fn unix_bind(&mut self, path: &CString) -> Result<~RtioUnixListener, IoError> + fn unix_bind(&mut self, path: &CString) -> Result<~rtio::RtioUnixListener, IoError> { - match PipeListener::bind(self.uv_loop(), path) { - Ok(p) => Ok(p as ~RtioUnixListener), + match PipeListener::bind(self, path) { + Ok(p) => Ok(p as ~rtio::RtioUnixListener), Err(e) => Err(uv_error_to_io_error(e)), } } - fn unix_connect(&mut self, path: &CString) -> Result<~RtioPipe, IoError> { - match PipeWatcher::connect(self.uv_loop(), path) { - Ok(p) => Ok(~p as ~RtioPipe), + fn unix_connect(&mut self, path: &CString) -> Result<~rtio::RtioPipe, IoError> { + match PipeWatcher::connect(self, path) { + Ok(p) => Ok(~p as ~rtio::RtioPipe), Err(e) => Err(uv_error_to_io_error(e)), } } fn tty_open(&mut self, fd: c_int, readable: bool) - -> Result<~RtioTTY, IoError> { - match TtyWatcher::new(self.uv_loop(), fd, readable) { - Ok(tty) => Ok(~tty as ~RtioTTY), + -> Result<~rtio::RtioTTY, IoError> { + match TtyWatcher::new(self, fd, readable) { + Ok(tty) => Ok(~tty as ~rtio::RtioTTY), Err(e) => Err(uv_error_to_io_error(e)) } } - fn pipe_open(&mut self, fd: c_int) -> Result<~RtioPipe, IoError> { - match PipeWatcher::open(self.uv_loop(), fd) { - Ok(s) => Ok(~s as ~RtioPipe), + fn pipe_open(&mut self, fd: c_int) -> Result<~rtio::RtioPipe, IoError> { + match PipeWatcher::open(self, fd) { + Ok(s) => Ok(~s as ~rtio::RtioPipe), Err(e) => Err(uv_error_to_io_error(e)) } } fn signal(&mut self, signum: Signum, channel: SharedChan) - -> Result<~RtioSignal, IoError> { - match SignalWatcher::new(self.uv_loop(), signum, channel) { - Ok(s) => Ok(s as ~RtioSignal), + -> Result<~rtio::RtioSignal, IoError> { + match SignalWatcher::new(self, signum, channel) { + Ok(s) => Ok(s as ~rtio::RtioSignal), Err(e) => Err(uv_error_to_io_error(e)), } } diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index dea90a40fa9fc..ad5fad99f2063 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -37,7 +37,8 @@ use std::libc; #[cfg(test)] use std::libc::uintptr_t; -pub use self::errors::*; +pub use self::errors::{EACCES, ECONNREFUSED, ECONNRESET, EPIPE, ECONNABORTED, + ECANCELED, EBADF, ENOTCONN, ENOENT}; pub static OK: c_int = 0; pub static EOF: c_int = -4095; @@ -576,6 +577,8 @@ extern { // generic uv functions pub fn uv_loop_delete(l: *uv_loop_t); + pub fn uv_ref(t: *uv_handle_t); + pub fn uv_unref(t: *uv_handle_t); pub fn uv_handle_size(ty: uv_handle_type) -> size_t; pub fn uv_req_size(ty: uv_req_type) -> size_t; pub fn uv_run(l: *uv_loop_t, mode: uv_run_mode) -> c_int; diff --git a/src/libstd/any.rs b/src/libstd/any.rs index 8bce687e24537..45a91d01b7a9c 100644 --- a/src/libstd/any.rs +++ b/src/libstd/any.rs @@ -20,10 +20,11 @@ use cast::transmute; use option::{Option, Some, None}; +use result::{Result, Ok, Err}; use to_str::ToStr; +use unstable::intrinsics::TypeId; use unstable::intrinsics; use util::Void; -use unstable::intrinsics::TypeId; /////////////////////////////////////////////////////////////////////////////// // Any trait @@ -118,13 +119,13 @@ impl<'a> AnyMutRefExt<'a> for &'a mut Any { /// Extension methods for a owning `Any` trait object pub trait AnyOwnExt { /// Returns the boxed value if it is of type `T`, or - /// `None` if it isn't. - fn move(self) -> Option<~T>; + /// `Err(Self)` if it isn't. + fn move(self) -> Result<~T, Self>; } impl AnyOwnExt for ~Any { #[inline] - fn move(self) -> Option<~T> { + fn move(self) -> Result<~T, ~Any> { if self.is::() { unsafe { // Extract the pointer to the boxed value, temporary alias with self @@ -133,10 +134,10 @@ impl AnyOwnExt for ~Any { // Prevent destructor on self being run intrinsics::forget(self); - Some(ptr) + Ok(ptr) } } else { - None + Err(self) } } } @@ -155,9 +156,8 @@ impl<'a> ToStr for &'a Any { #[cfg(test)] mod tests { + use prelude::*; use super::*; - use super::AnyRefExt; - use option::{Some, None}; #[deriving(Eq)] struct Test; @@ -384,13 +384,19 @@ mod tests { let a = ~8u as ~Any; let b = ~Test as ~Any; - assert_eq!(a.move(), Some(~8u)); - assert_eq!(b.move(), Some(~Test)); + match a.move::() { + Ok(a) => { assert_eq!(a, ~8u); } + Err(..) => fail!() + } + match b.move::() { + Ok(a) => { assert_eq!(a, ~Test); } + Err(..) => fail!() + } let a = ~8u as ~Any; let b = ~Test as ~Any; - assert_eq!(a.move(), None::<~Test>); - assert_eq!(b.move(), None::<~uint>); + assert!(a.move::<~Test>().is_err()); + assert!(b.move::<~uint>().is_err()); } } diff --git a/src/libstd/comm/imp.rs b/src/libstd/comm/imp.rs deleted file mode 100644 index bd1d6fed901ca..0000000000000 --- a/src/libstd/comm/imp.rs +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! One of the major goals behind this channel implementation is to work -//! seamlessly on and off the runtime. This also means that the code isn't -//! littered with "if is_green() { ... } else { ... }". Right now, the rest of -//! the runtime isn't quite ready to for this abstraction to be done very -//! nicely, so the conditional "if green" blocks are all contained in this inner -//! module. -//! -//! The goal of this module is to mirror what the runtime "should be", not the -//! state that it is currently in today. You'll notice that there is no mention -//! of schedulers or is_green inside any of the channel code, it is currently -//! entirely contained in this one module. -//! -//! In the ideal world, nothing in this module exists and it is all implemented -//! elsewhere in the runtime (in the proper location). All of this code is -//! structured in order to easily refactor this to the correct location whenever -//! we have the trait objects in place to serve as the boundary of the -//! abstraction. - -use iter::{range, Iterator}; -use ops::Drop; -use option::{Some, None, Option}; -use rt::local::Local; -use rt::sched::{SchedHandle, Scheduler, TaskFromFriend}; -use rt::thread::Thread; -use rt; -use unstable::mutex::Mutex; -use unstable::sync::UnsafeArc; - -// A task handle is a method of waking up a blocked task. The handle itself -// is completely opaque and only has a wake() method defined on it. This -// method will wake the method regardless of the context of the thread which -// is currently calling wake(). -// -// This abstraction should be able to be created when putting a task to -// sleep. This should basically be a method on whatever the local Task is, -// consuming the local Task. - -pub struct TaskHandle { - priv inner: TaskRepr -} -enum TaskRepr { - Green(rt::BlockedTask, *mut SchedHandle), - Native(NativeWakeupStyle), -} -enum NativeWakeupStyle { - ArcWakeup(UnsafeArc), // shared mutex to synchronize on - LocalWakeup(*mut Mutex), // synchronize on the task-local mutex -} - -impl TaskHandle { - // Signal that this handle should be woken up. The `can_resched` - // argument indicates whether the current task could possibly be - // rescheduled or not. This does not have a lot of meaning for the - // native case, but for an M:N case it indicates whether a context - // switch can happen or not. - pub fn wake(self, can_resched: bool) { - match self.inner { - Green(task, handle) => { - // If we have a local scheduler, then use that to run the - // blocked task, otherwise we can use the handle to send the - // task back to its home. - if rt::in_green_task_context() { - if can_resched { - task.wake().map(Scheduler::run_task); - } else { - let mut s: ~Scheduler = Local::take(); - s.enqueue_blocked_task(task); - Local::put(s); - } - } else { - let task = match task.wake() { - Some(task) => task, None => return - }; - // XXX: this is not an easy section of code to refactor. - // If this handle is owned by the Task (which it - // should be), then this would be a use-after-free - // because once the task is pushed onto the message - // queue, the handle is gone. - // - // Currently the handle is instead owned by the - // Port/Chan pair, which means that because a - // channel is invoking this method the handle will - // continue to stay alive for the entire duration - // of this method. This will require thought when - // moving the handle into the task. - unsafe { (*handle).send(TaskFromFriend(task)) } - } - } - - // Note that there are no use-after-free races in this code. In - // the arc-case, we own the lock, and in the local case, we're - // using a lock so it's guranteed that they aren't running while - // we hold the lock. - Native(ArcWakeup(lock)) => { - unsafe { - let lock = lock.get(); - (*lock).lock(); - (*lock).signal(); - (*lock).unlock(); - } - } - Native(LocalWakeup(lock)) => { - unsafe { - (*lock).lock(); - (*lock).signal(); - (*lock).unlock(); - } - } - } - } - - // Trashes handle to this task. This ensures that necessary memory is - // deallocated, and there may be some extra assertions as well. - pub fn trash(self) { - match self.inner { - Green(task, _) => task.assert_already_awake(), - Native(..) => {} - } - } -} - -// This structure is an abstraction of what should be stored in the local -// task itself. This data is currently stored inside of each channel, but -// this should rather be stored in each task (and channels will still -// continue to lazily initialize this data). - -pub struct TaskData { - priv handle: Option, - priv lock: Mutex, -} - -impl TaskData { - pub fn new() -> TaskData { - TaskData { - handle: None, - lock: unsafe { Mutex::empty() }, - } - } -} - -impl Drop for TaskData { - fn drop(&mut self) { - unsafe { self.lock.destroy() } - } -} - -// Now this is the really fun part. This is where all the M:N/1:1-agnostic -// along with recv/select-agnostic blocking information goes. A "blocking -// context" is really just a stack-allocated structure (which is probably -// fine to be a stack-trait-object). -// -// This has some particularly strange interfaces, but the reason for all -// this is to support selection/recv/1:1/M:N all in one bundle. - -pub struct BlockingContext<'a> { - priv inner: BlockingRepr<'a> -} - -enum BlockingRepr<'a> { - GreenBlock(rt::BlockedTask, &'a mut Scheduler), - NativeBlock(Option>), -} - -impl<'a> BlockingContext<'a> { - // Creates one blocking context. The data provided should in theory be - // acquired from the local task, but it is instead acquired from the - // channel currently. - // - // This function will call `f` with a blocking context, plus the data - // that it is given. This function will then return whether this task - // should actually go to sleep or not. If `true` is returned, then this - // function does not return until someone calls `wake()` on the task. - // If `false` is returned, then this function immediately returns. - // - // # Safety note - // - // Note that this stack closure may not be run on the same stack as when - // this function was called. This means that the environment of this - // stack closure could be unsafely aliased. This is currently prevented - // through the guarantee that this function will never return before `f` - // finishes executing. - pub fn one(data: &mut TaskData, - f: |BlockingContext, &mut TaskData| -> bool) { - if rt::in_green_task_context() { - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let ctx = BlockingContext { inner: GreenBlock(task, sched) }; - // no need to do something on success/failure other than - // returning because the `block` function for a BlockingContext - // takes care of reawakening itself if the blocking procedure - // fails. If this function is successful, then we're already - // blocked, and if it fails, the task will already be - // rescheduled. - f(ctx, data); - }); - } else { - unsafe { data.lock.lock(); } - let ctx = BlockingContext { inner: NativeBlock(None) }; - if f(ctx, data) { - unsafe { data.lock.wait(); } - } - unsafe { data.lock.unlock(); } - } - } - - // Creates many blocking contexts. The intended use case for this - // function is selection over a number of ports. This will create `amt` - // blocking contexts, yielding them to `f` in turn. If `f` returns - // false, then this function aborts and returns immediately. If `f` - // repeatedly returns `true` `amt` times, then this function will block. - pub fn many(amt: uint, f: |BlockingContext| -> bool) { - if rt::in_green_task_context() { - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - for handle in task.make_selectable(amt) { - let ctx = BlockingContext { - inner: GreenBlock(handle, sched) - }; - // see comment above in `one` for why no further action is - // necessary here - if !f(ctx) { break } - } - }); - } else { - // In the native case, our decision to block must be shared - // amongst all of the channels. It may be possible to - // stack-allocate this mutex (instead of putting it in an - // UnsafeArc box), but for now in order to prevent - // use-after-free trivially we place this into a box and then - // pass that around. - unsafe { - let mtx = UnsafeArc::new(Mutex::new()); - (*mtx.get()).lock(); - let success = range(0, amt).all(|_| { - f(BlockingContext { - inner: NativeBlock(Some(mtx.clone())) - }) - }); - if success { - (*mtx.get()).wait(); - } - (*mtx.get()).unlock(); - } - } - } - - // This function will consume this BlockingContext, and optionally block - // if according to the atomic `decision` function. The semantics of this - // functions are: - // - // * `slot` is required to be a `None`-slot (which is owned by the - // channel) - // * The `slot` will be filled in with a blocked version of the current - // task (with `wake`-ability if this function is successful). - // * If the `decision` function returns true, then this function - // immediately returns having relinquished ownership of the task. - // * If the `decision` function returns false, then the `slot` is reset - // to `None` and the task is re-scheduled if necessary (remember that - // the task will not resume executing before the outer `one` or - // `many` function has returned. This function is expected to have a - // release memory fence in order for the modifications of `to_wake` to be - // visible to other tasks. Code which attempts to read `to_wake` should - // have an acquiring memory fence to guarantee that this write is - // visible. - // - // This function will return whether the blocking occurred or not. - pub fn block(self, - data: &mut TaskData, - slot: &mut Option, - decision: || -> bool) -> bool { - assert!(slot.is_none()); - match self.inner { - GreenBlock(task, sched) => { - if data.handle.is_none() { - data.handle = Some(sched.make_handle()); - } - let handle = data.handle.get_mut_ref() as *mut SchedHandle; - *slot = Some(TaskHandle { inner: Green(task, handle) }); - - if !decision() { - match slot.take_unwrap().inner { - Green(task, _) => sched.enqueue_blocked_task(task), - Native(..) => unreachable!() - } - false - } else { - true - } - } - NativeBlock(shared) => { - *slot = Some(TaskHandle { - inner: Native(match shared { - Some(arc) => ArcWakeup(arc), - None => LocalWakeup(&mut data.lock as *mut Mutex), - }) - }); - - if !decision() { - *slot = None; - false - } else { - true - } - } - } - } -} - -// Agnostic method of forcing a yield of the current task -pub fn yield_now() { - if rt::in_green_task_context() { - let sched: ~Scheduler = Local::take(); - sched.yield_now(); - } else { - Thread::yield_now(); - } -} - -// Agnostic method of "maybe yielding" in order to provide fairness -pub fn maybe_yield() { - if rt::in_green_task_context() { - let sched: ~Scheduler = Local::take(); - sched.maybe_yield(); - } else { - // the OS decides fairness, nothing for us to do. - } -} diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs index 33d5d48ebdcae..21db234122b2e 100644 --- a/src/libstd/comm/mod.rs +++ b/src/libstd/comm/mod.rs @@ -233,14 +233,16 @@ use iter::Iterator; use kinds::Send; use ops::Drop; use option::{Option, Some, None}; +use result::{Ok, Err}; +use rt::local::Local; +use rt::task::{Task, BlockedTask}; use rt::thread::Thread; -use unstable::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed}; +use sync::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed}; use vec::{ImmutableVector, OwnedVector}; -use spsc = rt::spsc_queue; -use mpsc = rt::mpsc_queue; +use spsc = sync::spsc_queue; +use mpsc = sync::mpsc_queue; -use self::imp::{TaskHandle, TaskData, BlockingContext}; pub use self::select::Select; macro_rules! test ( @@ -248,24 +250,26 @@ macro_rules! test ( mod $name { #[allow(unused_imports)]; - use util; - use super::super::*; + use native; use prelude::*; + use super::*; + use super::super::*; + use task; + use util; fn f() $b $($a)* #[test] fn uv() { f() } - $($a)* #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn native() { - use unstable::run_in_bare_thread; - run_in_bare_thread(f); + $($a)* #[test] fn native() { + use native; + let (p, c) = Chan::new(); + do native::task::spawn { c.send(f()) } + p.recv(); } } ) ) -mod imp; mod select; /////////////////////////////////////////////////////////////////////////////// @@ -326,9 +330,7 @@ pub struct SharedChan { struct Packet { cnt: AtomicInt, // How many items are on this channel steals: int, // How many times has a port received without blocking? - to_wake: Option, // Task to wake up - - data: TaskData, + to_wake: Option, // Task to wake up // This lock is used to wake up native threads blocked in select. The // `lock` field is not used because the thread blocking in select must @@ -343,6 +345,7 @@ struct Packet { selection_id: uint, select_next: *mut Packet, select_prev: *mut Packet, + recv_cnt: int, } /////////////////////////////////////////////////////////////////////////////// @@ -358,13 +361,13 @@ impl Packet { cnt: AtomicInt::new(0), steals: 0, to_wake: None, - data: TaskData::new(), channels: AtomicInt::new(1), selecting: AtomicBool::new(false), selection_id: 0, select_next: 0 as *mut Packet, select_prev: 0 as *mut Packet, + recv_cnt: 0, } } @@ -418,7 +421,10 @@ impl Packet { // This function must have had at least an acquire fence before it to be // properly called. fn wakeup(&mut self, can_resched: bool) { - self.to_wake.take_unwrap().wake(can_resched); + match self.to_wake.take_unwrap().wake() { + Some(task) => task.reawaken(can_resched), + None => {} + } self.selecting.store(false, Relaxed); } @@ -490,7 +496,7 @@ impl Packet { match self.channels.fetch_sub(1, SeqCst) { 1 => { match self.cnt.swap(DISCONNECTED, SeqCst) { - -1 => { self.wakeup(false); } + -1 => { self.wakeup(true); } DISCONNECTED => {} n => { assert!(n >= 0); } } @@ -531,9 +537,6 @@ impl Chan { /// port. /// /// Rust channels are infinitely buffered so this method will never block. - /// This method may trigger a rescheduling, however, in order to wake up a - /// blocked receiver (if one is present). If no scheduling is desired, then - /// the `send_deferred` guarantees that there will be no reschedulings. /// /// # Failure /// @@ -555,15 +558,6 @@ impl Chan { } } - /// This function is equivalent in the semantics of `send`, but it - /// guarantees that a rescheduling will never occur when this method is - /// called. - pub fn send_deferred(&self, t: T) { - if !self.try_send_deferred(t) { - fail!("sending on a closed channel"); - } - } - /// Attempts to send a value on this channel, returning whether it was /// successfully sent. /// @@ -579,9 +573,8 @@ impl Chan { /// be tolerated, then this method should be used instead. pub fn try_send(&self, t: T) -> bool { self.try(t, true) } - /// This function is equivalent in the semantics of `try_send`, but it - /// guarantees that a rescheduling will never occur when this method is - /// called. + /// This function will not stick around for very long. The purpose of this + /// function is to guarantee that no rescheduling is performed. pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) } fn try(&self, t: T, can_resched: bool) -> bool { @@ -606,8 +599,9 @@ impl Chan { // the TLS overhead can be a bit much. n => { assert!(n >= 0); - if can_resched && n > 0 && n % RESCHED_FREQ == 0 { - imp::maybe_yield(); + if n > 0 && n % RESCHED_FREQ == 0 { + let task: ~Task = Local::take(); + task.maybe_yield(); } true } @@ -642,25 +636,9 @@ impl SharedChan { } } - /// This function is equivalent in the semantics of `send`, but it - /// guarantees that a rescheduling will never occur when this method is - /// called. - pub fn send_deferred(&self, t: T) { - if !self.try_send_deferred(t) { - fail!("sending on a closed channel"); - } - } - /// Equivalent method to `try_send` on the `Chan` type (using the same /// semantics) - pub fn try_send(&self, t: T) -> bool { self.try(t, true) } - - /// This function is equivalent in the semantics of `try_send`, but it - /// guarantees that a rescheduling will never occur when this method is - /// called. - pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) } - - fn try(&self, t: T, can_resched: bool) -> bool { + pub fn try_send(&self, t: T) -> bool { unsafe { // Note that the multiple sender case is a little tricker // semantically than the single sender case. The logic for @@ -697,10 +675,11 @@ impl SharedChan { match (*packet).increment() { DISCONNECTED => {} // oh well, we tried - -1 => { (*packet).wakeup(can_resched); } + -1 => { (*packet).wakeup(true); } n => { - if can_resched && n > 0 && n % RESCHED_FREQ == 0 { - imp::maybe_yield(); + if n > 0 && n % RESCHED_FREQ == 0 { + let task: ~Task = Local::take(); + task.maybe_yield(); } } } @@ -768,6 +747,18 @@ impl Port { // This is a "best effort" situation, so if a queue is inconsistent just // don't worry about it. let this = unsafe { cast::transmute_mut(self) }; + + // See the comment about yielding on sends, but the same applies here. + // If a thread is spinning in try_recv we should try + unsafe { + let packet = this.queue.packet(); + (*packet).recv_cnt += 1; + if (*packet).recv_cnt % RESCHED_FREQ == 0 { + let task: ~Task = Local::take(); + task.maybe_yield(); + } + } + let ret = match this.queue { SPSC(ref mut queue) => queue.pop(), MPSC(ref mut queue) => match queue.pop() { @@ -840,15 +831,22 @@ impl Port { unsafe { this = cast::transmute_mut(self); packet = this.queue.packet(); - BlockingContext::one(&mut (*packet).data, |ctx, data| { - ctx.block(data, &mut (*packet).to_wake, || (*packet).decrement()) + let task: ~Task = Local::take(); + task.deschedule(1, |task| { + assert!((*packet).to_wake.is_none()); + (*packet).to_wake = Some(task); + if (*packet).decrement() { + Ok(()) + } else { + Err((*packet).to_wake.take_unwrap()) + } }); } let data = self.try_recv_inc(false); if data.is_none() && unsafe { (*packet).cnt.load(SeqCst) } != DISCONNECTED { - fail!("bug: woke up too soon"); + fail!("bug: woke up too soon {}", unsafe { (*packet).cnt.load(SeqCst) }); } return data; } @@ -880,10 +878,16 @@ impl Drop for Port { mod test { use prelude::*; - use task; - use rt::thread::Thread; + use native; + use os; use super::*; - use rt::test::*; + + pub fn stress_factor() -> uint { + match os::getenv("RUST_TEST_STRESS") { + Some(val) => from_str::(val).unwrap(), + None => 1, + } + } test!(fn smoke() { let (p, c) = Chan::new(); @@ -910,99 +914,88 @@ mod test { assert_eq!(p.recv(), 1); }) - #[test] - fn smoke_threads() { + test!(fn smoke_threads() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { c.send(1); } assert_eq!(p.recv(), 1); - } + }) - #[test] #[should_fail] - fn smoke_port_gone() { + test!(fn smoke_port_gone() { let (p, c) = Chan::new(); drop(p); c.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_shared_port_gone() { + test!(fn smoke_shared_port_gone() { let (p, c) = SharedChan::new(); drop(p); c.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_shared_port_gone2() { + test!(fn smoke_shared_port_gone2() { let (p, c) = SharedChan::new(); drop(p); let c2 = c.clone(); drop(c); c2.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn port_gone_concurrent() { + test!(fn port_gone_concurrent() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { p.recv(); } loop { c.send(1) } - } + } #[should_fail]) - #[test] #[should_fail] - fn port_gone_concurrent_shared() { + test!(fn port_gone_concurrent_shared() { let (p, c) = SharedChan::new(); let c1 = c.clone(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { p.recv(); } loop { c.send(1); c1.send(1); } - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_chan_gone() { + test!(fn smoke_chan_gone() { let (p, c) = Chan::::new(); drop(c); p.recv(); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_chan_gone_shared() { + test!(fn smoke_chan_gone_shared() { let (p, c) = SharedChan::<()>::new(); let c2 = c.clone(); drop(c); drop(c2); p.recv(); - } + } #[should_fail]) - #[test] #[should_fail] - fn chan_gone_concurrent() { + test!(fn chan_gone_concurrent() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { c.send(1); c.send(1); } loop { p.recv(); } - } + } #[should_fail]) - #[test] - fn stress() { + test!(fn stress() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { for _ in range(0, 10000) { c.send(1); } } for _ in range(0, 10000) { assert_eq!(p.recv(), 1); } - } + }) - #[test] - fn stress_shared() { + test!(fn stress_shared() { static AMT: uint = 10000; static NTHREADS: uint = 8; let (p, c) = SharedChan::::new(); @@ -1018,47 +1011,53 @@ mod test { for _ in range(0, NTHREADS) { let c = c.clone(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { for _ in range(0, AMT) { c.send(1); } } } p1.recv(); - - } + }) #[test] #[ignore(cfg(windows))] // FIXME(#11003) fn send_from_outside_runtime() { let (p, c) = Chan::::new(); let (p1, c1) = Chan::new(); + let (port, chan) = SharedChan::new(); + let chan2 = chan.clone(); do spawn { c1.send(()); for _ in range(0, 40) { assert_eq!(p.recv(), 1); } + chan2.send(()); } p1.recv(); - let t = do Thread::start { + do native::task::spawn { for _ in range(0, 40) { c.send(1); } - }; - t.join(); + chan.send(()); + } + port.recv(); + port.recv(); } #[test] #[ignore(cfg(windows))] // FIXME(#11003) fn recv_from_outside_runtime() { let (p, c) = Chan::::new(); - let t = do Thread::start { + let (dp, dc) = Chan::new(); + do native::task::spawn { for _ in range(0, 40) { assert_eq!(p.recv(), 1); } + dc.send(()); }; for _ in range(0, 40) { c.send(1); } - t.join(); + dp.recv(); } #[test] @@ -1066,173 +1065,132 @@ mod test { fn no_runtime() { let (p1, c1) = Chan::::new(); let (p2, c2) = Chan::::new(); - let t1 = do Thread::start { + let (port, chan) = SharedChan::new(); + let chan2 = chan.clone(); + do native::task::spawn { assert_eq!(p1.recv(), 1); c2.send(2); - }; - let t2 = do Thread::start { + chan2.send(()); + } + do native::task::spawn { c1.send(1); assert_eq!(p2.recv(), 2); - }; - t1.join(); - t2.join(); + chan.send(()); + } + port.recv(); + port.recv(); } - #[test] - fn oneshot_single_thread_close_port_first() { + test!(fn oneshot_single_thread_close_port_first() { // Simple test of closing without sending - do run_in_newsched_task { - let (port, _chan) = Chan::::new(); - { let _p = port; } - } - } + let (port, _chan) = Chan::::new(); + { let _p = port; } + }) - #[test] - fn oneshot_single_thread_close_chan_first() { + test!(fn oneshot_single_thread_close_chan_first() { // Simple test of closing without sending - do run_in_newsched_task { - let (_port, chan) = Chan::::new(); - { let _c = chan; } - } - } + let (_port, chan) = Chan::::new(); + { let _c = chan; } + }) - #[test] #[should_fail] - fn oneshot_single_thread_send_port_close() { + test!(fn oneshot_single_thread_send_port_close() { // Testing that the sender cleans up the payload if receiver is closed let (port, chan) = Chan::<~int>::new(); { let _p = port; } chan.send(~0); - } + } #[should_fail]) - #[test] - fn oneshot_single_thread_recv_chan_close() { + test!(fn oneshot_single_thread_recv_chan_close() { // Receiving on a closed chan will fail - do run_in_newsched_task { - let res = do spawntask_try { - let (port, chan) = Chan::<~int>::new(); - { let _c = chan; } - port.recv(); - }; - // What is our res? - assert!(res.is_err()); - } - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - do run_in_newsched_task { + let res = do task::try { let (port, chan) = Chan::<~int>::new(); - chan.send(~10); - assert!(port.recv() == ~10); - } - } + { let _c = chan; } + port.recv(); + }; + // What is our res? + assert!(res.is_err()); + }) - #[test] - fn oneshot_single_thread_try_send_open() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - assert!(chan.try_send(10)); - assert!(port.recv() == 10); - } - } + test!(fn oneshot_single_thread_send_then_recv() { + let (port, chan) = Chan::<~int>::new(); + chan.send(~10); + assert!(port.recv() == ~10); + }) - #[test] - fn oneshot_single_thread_try_send_closed() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _p = port; } - assert!(!chan.try_send(10)); - } - } + test!(fn oneshot_single_thread_try_send_open() { + let (port, chan) = Chan::::new(); + assert!(chan.try_send(10)); + assert!(port.recv() == 10); + }) - #[test] - fn oneshot_single_thread_try_recv_open() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - chan.send(10); - assert!(port.try_recv() == Some(10)); - } - } + test!(fn oneshot_single_thread_try_send_closed() { + let (port, chan) = Chan::::new(); + { let _p = port; } + assert!(!chan.try_send(10)); + }) - #[test] - fn oneshot_single_thread_try_recv_closed() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _c = chan; } - assert!(port.recv_opt() == None); - } - } + test!(fn oneshot_single_thread_try_recv_open() { + let (port, chan) = Chan::::new(); + chan.send(10); + assert!(port.try_recv() == Some(10)); + }) - #[test] - fn oneshot_single_thread_peek_data() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - assert!(port.try_recv().is_none()); - chan.send(10); - assert!(port.try_recv().is_some()); - } - } + test!(fn oneshot_single_thread_try_recv_closed() { + let (port, chan) = Chan::::new(); + { let _c = chan; } + assert!(port.recv_opt() == None); + }) - #[test] - fn oneshot_single_thread_peek_close() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _c = chan; } - assert!(port.try_recv().is_none()); - assert!(port.try_recv().is_none()); - } - } + test!(fn oneshot_single_thread_peek_data() { + let (port, chan) = Chan::::new(); + assert!(port.try_recv().is_none()); + chan.send(10); + assert!(port.try_recv().is_some()); + }) - #[test] - fn oneshot_single_thread_peek_open() { - do run_in_newsched_task { - let (port, _) = Chan::::new(); - assert!(port.try_recv().is_none()); - } - } + test!(fn oneshot_single_thread_peek_close() { + let (port, chan) = Chan::::new(); + { let _c = chan; } + assert!(port.try_recv().is_none()); + assert!(port.try_recv().is_none()); + }) - #[test] - fn oneshot_multi_task_recv_then_send() { - do run_in_newsched_task { - let (port, chan) = Chan::<~int>::new(); - do spawntask { - assert!(port.recv() == ~10); - } + test!(fn oneshot_single_thread_peek_open() { + let (port, _) = Chan::::new(); + assert!(port.try_recv().is_none()); + }) - chan.send(~10); + test!(fn oneshot_multi_task_recv_then_send() { + let (port, chan) = Chan::<~int>::new(); + do spawn { + assert!(port.recv() == ~10); } - } - #[test] - fn oneshot_multi_task_recv_then_close() { - do run_in_newsched_task { - let (port, chan) = Chan::<~int>::new(); - do spawntask_later { - let _chan = chan; - } - let res = do spawntask_try { - assert!(port.recv() == ~10); - }; - assert!(res.is_err()); + chan.send(~10); + }) + + test!(fn oneshot_multi_task_recv_then_close() { + let (port, chan) = Chan::<~int>::new(); + do spawn { + let _chan = chan; } - } + let res = do task::try { + assert!(port.recv() == ~10); + }; + assert!(res.is_err()); + }) - #[test] - fn oneshot_multi_thread_close_stress() { + test!(fn oneshot_multi_thread_close_stress() { stress_factor().times(|| { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - let thread = do spawntask_thread { - let _p = port; - }; - let _chan = chan; - thread.join(); + let (port, chan) = Chan::::new(); + do spawn { + let _p = port; } + let _chan = chan; }) - } + }) - #[test] - fn oneshot_multi_thread_send_close_stress() { + test!(fn oneshot_multi_thread_send_close_stress() { stress_factor().times(|| { let (port, chan) = Chan::::new(); do spawn { @@ -1242,10 +1200,9 @@ mod test { chan.send(1); }; }) - } + }) - #[test] - fn oneshot_multi_thread_recv_close_stress() { + test!(fn oneshot_multi_thread_recv_close_stress() { stress_factor().times(|| { let (port, chan) = Chan::::new(); do spawn { @@ -1262,10 +1219,9 @@ mod test { } }; }) - } + }) - #[test] - fn oneshot_multi_thread_send_recv_stress() { + test!(fn oneshot_multi_thread_send_recv_stress() { stress_factor().times(|| { let (port, chan) = Chan::<~int>::new(); do spawn { @@ -1275,10 +1231,9 @@ mod test { assert!(port.recv() == ~10); } }) - } + }) - #[test] - fn stream_send_recv_stress() { + test!(fn stream_send_recv_stress() { stress_factor().times(|| { let (port, chan) = Chan::<~int>::new(); @@ -1288,7 +1243,7 @@ mod test { fn send(chan: Chan<~int>, i: int) { if i == 10 { return } - do spawntask_random { + do spawn { chan.send(~i); send(chan, i + 1); } @@ -1297,44 +1252,37 @@ mod test { fn recv(port: Port<~int>, i: int) { if i == 10 { return } - do spawntask_random { + do spawn { assert!(port.recv() == ~i); recv(port, i + 1); }; } }) - } + }) - #[test] - fn recv_a_lot() { + test!(fn recv_a_lot() { // Regression test that we don't run out of stack in scheduler context - do run_in_newsched_task { - let (port, chan) = Chan::new(); - 10000.times(|| { chan.send(()) }); - 10000.times(|| { port.recv() }); - } - } + let (port, chan) = Chan::new(); + 10000.times(|| { chan.send(()) }); + 10000.times(|| { port.recv() }); + }) - #[test] - fn shared_chan_stress() { - do run_in_mt_newsched_task { - let (port, chan) = SharedChan::new(); - let total = stress_factor() + 100; - total.times(|| { - let chan_clone = chan.clone(); - do spawntask_random { - chan_clone.send(()); - } - }); + test!(fn shared_chan_stress() { + let (port, chan) = SharedChan::new(); + let total = stress_factor() + 100; + total.times(|| { + let chan_clone = chan.clone(); + do spawn { + chan_clone.send(()); + } + }); - total.times(|| { - port.recv(); - }); - } - } + total.times(|| { + port.recv(); + }); + }) - #[test] - fn test_nested_recv_iter() { + test!(fn test_nested_recv_iter() { let (port, chan) = Chan::::new(); let (total_port, total_chan) = Chan::::new(); @@ -1351,10 +1299,9 @@ mod test { chan.send(2); drop(chan); assert_eq!(total_port.recv(), 6); - } + }) - #[test] - fn test_recv_iter_break() { + test!(fn test_recv_iter_break() { let (port, chan) = Chan::::new(); let (count_port, count_chan) = Chan::::new(); @@ -1376,5 +1323,5 @@ mod test { chan.try_send(2); drop(chan); assert_eq!(count_port.recv(), 4); - } + }) } diff --git a/src/libstd/comm/select.rs b/src/libstd/comm/select.rs index bbd4cfea78d7a..302c9d9ea469b 100644 --- a/src/libstd/comm/select.rs +++ b/src/libstd/comm/select.rs @@ -50,10 +50,13 @@ use kinds::Send; use ops::Drop; use option::{Some, None, Option}; use ptr::RawPtr; -use super::imp::BlockingContext; -use super::{Packet, Port, imp}; +use result::{Ok, Err}; +use rt::local::Local; +use rt::task::Task; +use super::{Packet, Port}; +use sync::atomics::{Relaxed, SeqCst}; +use task; use uint; -use unstable::atomics::{Relaxed, SeqCst}; macro_rules! select { ( @@ -184,19 +187,22 @@ impl Select { // Acquire a number of blocking contexts, and block on each one // sequentially until one fails. If one fails, then abort // immediately so we can go unblock on all the other ports. - BlockingContext::many(amt, |ctx| { + let task: ~Task = Local::take(); + task.deschedule(amt, |task| { + // Prepare for the block let (i, packet) = iter.next().unwrap(); + assert!((*packet).to_wake.is_none()); + (*packet).to_wake = Some(task); (*packet).selecting.store(true, SeqCst); - if !ctx.block(&mut (*packet).data, - &mut (*packet).to_wake, - || (*packet).decrement()) { + + if (*packet).decrement() { + Ok(()) + } else { (*packet).abort_selection(false); (*packet).selecting.store(false, SeqCst); ready_index = i; ready_id = (*packet).selection_id; - false - } else { - true + Err((*packet).to_wake.take_unwrap()) } }); @@ -225,7 +231,7 @@ impl Select { if (*packet).abort_selection(true) { ready_id = (*packet).selection_id; while (*packet).selecting.load(Relaxed) { - imp::yield_now(); + task::deschedule(); } } } @@ -304,6 +310,7 @@ impl Iterator<*mut Packet> for PacketIterator { } #[cfg(test)] +#[allow(unused_imports)] mod test { use super::super::*; use prelude::*; @@ -359,19 +366,16 @@ mod test { ) }) - #[test] - fn unblocks() { - use std::io::timer; - + test!(fn unblocks() { let (mut p1, c1) = Chan::::new(); let (mut p2, _c2) = Chan::::new(); let (p3, c3) = Chan::::new(); do spawn { - timer::sleep(3); + 20.times(task::deschedule); c1.send(1); p3.recv(); - timer::sleep(3); + 20.times(task::deschedule); } select! ( @@ -383,18 +387,15 @@ mod test { a = p1.recv_opt() => { assert_eq!(a, None); }, _b = p2.recv() => { fail!() } ) - } - - #[test] - fn both_ready() { - use std::io::timer; + }) + test!(fn both_ready() { let (mut p1, c1) = Chan::::new(); let (mut p2, c2) = Chan::::new(); let (p3, c3) = Chan::<()>::new(); do spawn { - timer::sleep(3); + 20.times(task::deschedule); c1.send(1); c2.send(2); p3.recv(); @@ -408,11 +409,12 @@ mod test { a = p1.recv() => { assert_eq!(a, 1); }, a = p2.recv() => { assert_eq!(a, 2); } ) + assert_eq!(p1.try_recv(), None); + assert_eq!(p2.try_recv(), None); c3.send(()); - } + }) - #[test] - fn stress() { + test!(fn stress() { static AMT: int = 10000; let (mut p1, c1) = Chan::::new(); let (mut p2, c2) = Chan::::new(); @@ -436,69 +438,5 @@ mod test { ) c3.send(()); } - } - - #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn stress_native() { - use std::rt::thread::Thread; - use std::unstable::run_in_bare_thread; - static AMT: int = 10000; - - do run_in_bare_thread { - let (mut p1, c1) = Chan::::new(); - let (mut p2, c2) = Chan::::new(); - let (p3, c3) = Chan::<()>::new(); - - let t = do Thread::start { - for i in range(0, AMT) { - if i % 2 == 0 { - c1.send(i); - } else { - c2.send(i); - } - p3.recv(); - } - }; - - for i in range(0, AMT) { - select! ( - i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); }, - i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); } - ) - c3.send(()); - } - t.join(); - } - } - - #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn native_both_ready() { - use std::rt::thread::Thread; - use std::unstable::run_in_bare_thread; - - do run_in_bare_thread { - let (mut p1, c1) = Chan::::new(); - let (mut p2, c2) = Chan::::new(); - let (p3, c3) = Chan::<()>::new(); - - let t = do Thread::start { - c1.send(1); - c2.send(2); - p3.recv(); - }; - - select! ( - a = p1.recv() => { assert_eq!(a, 1); }, - b = p2.recv() => { assert_eq!(b, 2); } - ) - select! ( - a = p1.recv() => { assert_eq!(a, 1); }, - b = p2.recv() => { assert_eq!(b, 2); } - ) - c3.send(()); - t.join(); - } - } + }) } diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs index a1465ca7b33a0..b4838d534dcd6 100644 --- a/src/libstd/io/fs.rs +++ b/src/libstd/io/fs.rs @@ -54,7 +54,7 @@ use super::{SeekStyle, Read, Write, Open, IoError, Truncate, use rt::rtio::{RtioFileStream, IoFactory, LocalIo}; use io; use option::{Some, None, Option}; -use result::{Ok, Err, Result}; +use result::{Ok, Err}; use path; use path::{Path, GenericPath}; use vec::{OwnedVector, ImmutableVector}; @@ -75,17 +75,6 @@ pub struct File { priv last_nread: int, } -fn io_raise(f: |io: &mut IoFactory| -> Result) -> Option { - let mut io = LocalIo::borrow(); - match f(io.get()) { - Ok(t) => Some(t), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } -} - impl File { /// Open a file at `path` in the mode specified by the `mode` and `access` /// arguments @@ -131,18 +120,15 @@ impl File { pub fn open_mode(path: &Path, mode: FileMode, access: FileAccess) -> Option { - let mut io = LocalIo::borrow(); - match io.get().fs_open(&path.to_c_str(), mode, access) { - Ok(fd) => Some(File { - path: path.clone(), - fd: fd, - last_nread: -1 - }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.fs_open(&path.to_c_str(), mode, access).map(|fd| { + File { + path: path.clone(), + fd: fd, + last_nread: -1 + } + }) + }) } /// Attempts to open a file in read-only mode. This function is equivalent to @@ -242,7 +228,7 @@ impl File { /// directory, the user lacks permissions to remove the file, or if some /// other filesystem-level error occurs. pub fn unlink(path: &Path) { - io_raise(|io| io.fs_unlink(&path.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_unlink(&path.to_c_str())); } /// Given a path, query the file system to get information about a file, @@ -270,7 +256,9 @@ pub fn unlink(path: &Path) { /// requisite permissions to perform a `stat` call on the given path or if /// there is no entry in the filesystem at the provided path. pub fn stat(path: &Path) -> FileStat { - io_raise(|io| io.fs_stat(&path.to_c_str())).unwrap_or_else(dummystat) + LocalIo::maybe_raise(|io| { + io.fs_stat(&path.to_c_str()) + }).unwrap_or_else(dummystat) } fn dummystat() -> FileStat { @@ -306,7 +294,9 @@ fn dummystat() -> FileStat { /// /// See `stat` pub fn lstat(path: &Path) -> FileStat { - io_raise(|io| io.fs_lstat(&path.to_c_str())).unwrap_or_else(dummystat) + LocalIo::maybe_raise(|io| { + io.fs_lstat(&path.to_c_str()) + }).unwrap_or_else(dummystat) } /// Rename a file or directory to a new name. @@ -324,7 +314,7 @@ pub fn lstat(path: &Path) -> FileStat { /// the process lacks permissions to view the contents, or if some other /// intermittent I/O error occurs. pub fn rename(from: &Path, to: &Path) { - io_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str())); } /// Copies the contents of one file to another. This function will also @@ -395,7 +385,7 @@ pub fn copy(from: &Path, to: &Path) { /// condition. Some possible error situations are not having the permission to /// change the attributes of a file or the file not existing. pub fn chmod(path: &Path, mode: io::FilePermission) { - io_raise(|io| io.fs_chmod(&path.to_c_str(), mode)); + LocalIo::maybe_raise(|io| io.fs_chmod(&path.to_c_str(), mode)); } /// Change the user and group owners of a file at the specified path. @@ -404,7 +394,7 @@ pub fn chmod(path: &Path, mode: io::FilePermission) { /// /// This function will raise on the `io_error` condition on failure. pub fn chown(path: &Path, uid: int, gid: int) { - io_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid)); + LocalIo::maybe_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid)); } /// Creates a new hard link on the filesystem. The `dst` path will be a @@ -415,7 +405,7 @@ pub fn chown(path: &Path, uid: int, gid: int) { /// /// This function will raise on the `io_error` condition on failure. pub fn link(src: &Path, dst: &Path) { - io_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str())); } /// Creates a new symbolic link on the filesystem. The `dst` path will be a @@ -425,7 +415,7 @@ pub fn link(src: &Path, dst: &Path) { /// /// This function will raise on the `io_error` condition on failure. pub fn symlink(src: &Path, dst: &Path) { - io_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str())); } /// Reads a symlink, returning the file that the symlink points to. @@ -436,7 +426,7 @@ pub fn symlink(src: &Path, dst: &Path) { /// conditions include reading a file that does not exist or reading a file /// which is not a symlink. pub fn readlink(path: &Path) -> Option { - io_raise(|io| io.fs_readlink(&path.to_c_str())) + LocalIo::maybe_raise(|io| io.fs_readlink(&path.to_c_str())) } /// Create a new, empty directory at the provided path @@ -456,7 +446,7 @@ pub fn readlink(path: &Path) -> Option { /// to make a new directory at the provided path, or if the directory already /// exists. pub fn mkdir(path: &Path, mode: FilePermission) { - io_raise(|io| io.fs_mkdir(&path.to_c_str(), mode)); + LocalIo::maybe_raise(|io| io.fs_mkdir(&path.to_c_str(), mode)); } /// Remove an existing, empty directory @@ -475,7 +465,7 @@ pub fn mkdir(path: &Path, mode: FilePermission) { /// to remove the directory at the provided path, or if the directory isn't /// empty. pub fn rmdir(path: &Path) { - io_raise(|io| io.fs_rmdir(&path.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_rmdir(&path.to_c_str())); } /// Retrieve a vector containing all entries within a provided directory @@ -502,7 +492,9 @@ pub fn rmdir(path: &Path) { /// the process lacks permissions to view the contents or if the `path` points /// at a non-directory file pub fn readdir(path: &Path) -> ~[Path] { - io_raise(|io| io.fs_readdir(&path.to_c_str(), 0)).unwrap_or_else(|| ~[]) + LocalIo::maybe_raise(|io| { + io.fs_readdir(&path.to_c_str(), 0) + }).unwrap_or_else(|| ~[]) } /// Returns an iterator which will recursively walk the directory structure @@ -583,7 +575,7 @@ pub fn rmdir_recursive(path: &Path) { /// happens. // FIXME(#10301) these arguments should not be u64 pub fn change_file_times(path: &Path, atime: u64, mtime: u64) { - io_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime)); + LocalIo::maybe_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime)); } impl Reader for File { @@ -722,7 +714,7 @@ mod test { } } - fn tmpdir() -> TempDir { + pub fn tmpdir() -> TempDir { use os; use rand; let ret = os::tmpdir().join(format!("rust-{}", rand::random::())); @@ -730,32 +722,7 @@ mod test { TempDir(ret) } - macro_rules! test ( - { fn $name:ident() $b:block } => ( - mod $name { - use prelude::*; - use io::{SeekSet, SeekCur, SeekEnd, io_error, Read, Open, - ReadWrite}; - use io; - use str; - use io::fs::{File, rmdir, mkdir, readdir, rmdir_recursive, - mkdir_recursive, copy, unlink, stat, symlink, link, - readlink, chmod, lstat, change_file_times}; - use io::fs::test::tmpdir; - use util; - - fn f() $b - - #[test] fn uv() { f() } - #[test] fn native() { - use rt::test::run_in_newsched_task; - run_in_newsched_task(f); - } - } - ) - ) - - test!(fn file_test_io_smoke_test() { + iotest!(fn file_test_io_smoke_test() { let message = "it's alright. have a good time"; let tmpdir = tmpdir(); let filename = &tmpdir.join("file_rt_io_file_test.txt"); @@ -775,7 +742,7 @@ mod test { unlink(filename); }) - test!(fn invalid_path_raises() { + iotest!(fn invalid_path_raises() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_that_does_not_exist.txt"); let mut called = false; @@ -788,7 +755,7 @@ mod test { assert!(called); }) - test!(fn file_test_iounlinking_invalid_path_should_raise_condition() { + iotest!(fn file_test_iounlinking_invalid_path_should_raise_condition() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt"); let mut called = false; @@ -798,7 +765,7 @@ mod test { assert!(called); }) - test!(fn file_test_io_non_positional_read() { + iotest!(fn file_test_io_non_positional_read() { let message: &str = "ten-four"; let mut read_mem = [0, .. 8]; let tmpdir = tmpdir(); @@ -823,7 +790,7 @@ mod test { assert_eq!(read_str, message); }) - test!(fn file_test_io_seek_and_tell_smoke_test() { + iotest!(fn file_test_io_seek_and_tell_smoke_test() { let message = "ten-four"; let mut read_mem = [0, .. 4]; let set_cursor = 4 as u64; @@ -849,7 +816,7 @@ mod test { assert_eq!(tell_pos_post_read, message.len() as u64); }) - test!(fn file_test_io_seek_and_write() { + iotest!(fn file_test_io_seek_and_write() { let initial_msg = "food-is-yummy"; let overwrite_msg = "-the-bar!!"; let final_msg = "foo-the-bar!!"; @@ -872,7 +839,7 @@ mod test { assert!(read_str == final_msg.to_owned()); }) - test!(fn file_test_io_seek_shakedown() { + iotest!(fn file_test_io_seek_shakedown() { use std::str; // 01234567890123 let initial_msg = "qwer-asdf-zxcv"; let chunk_one: &str = "qwer"; @@ -903,7 +870,7 @@ mod test { unlink(filename); }) - test!(fn file_test_stat_is_correct_on_is_file() { + iotest!(fn file_test_stat_is_correct_on_is_file() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_stat_correct_on_is_file.txt"); { @@ -916,7 +883,7 @@ mod test { unlink(filename); }) - test!(fn file_test_stat_is_correct_on_is_dir() { + iotest!(fn file_test_stat_is_correct_on_is_dir() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_stat_correct_on_is_dir"); mkdir(filename, io::UserRWX); @@ -925,7 +892,7 @@ mod test { rmdir(filename); }) - test!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() { + iotest!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() { let tmpdir = tmpdir(); let dir = &tmpdir.join("fileinfo_false_on_dir"); mkdir(dir, io::UserRWX); @@ -933,7 +900,7 @@ mod test { rmdir(dir); }) - test!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() { + iotest!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() { let tmpdir = tmpdir(); let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt"); File::create(file).write(bytes!("foo")); @@ -942,7 +909,7 @@ mod test { assert!(!file.exists()); }) - test!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() { + iotest!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() { let tmpdir = tmpdir(); let dir = &tmpdir.join("before_and_after_dir"); assert!(!dir.exists()); @@ -953,7 +920,7 @@ mod test { assert!(!dir.exists()); }) - test!(fn file_test_directoryinfo_readdir() { + iotest!(fn file_test_directoryinfo_readdir() { use std::str; let tmpdir = tmpdir(); let dir = &tmpdir.join("di_readdir"); @@ -984,11 +951,11 @@ mod test { rmdir(dir); }) - test!(fn recursive_mkdir_slash() { + iotest!(fn recursive_mkdir_slash() { mkdir_recursive(&Path::new("/"), io::UserRWX); }) - test!(fn unicode_path_is_dir() { + iotest!(fn unicode_path_is_dir() { assert!(Path::new(".").is_dir()); assert!(!Path::new("test/stdtest/fs.rs").is_dir()); @@ -1006,7 +973,7 @@ mod test { assert!(filepath.exists()); }) - test!(fn unicode_path_exists() { + iotest!(fn unicode_path_exists() { assert!(Path::new(".").exists()); assert!(!Path::new("test/nonexistent-bogus-path").exists()); @@ -1018,7 +985,7 @@ mod test { assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists()); }) - test!(fn copy_file_does_not_exist() { + iotest!(fn copy_file_does_not_exist() { let from = Path::new("test/nonexistent-bogus-path"); let to = Path::new("test/other-bogus-path"); match io::result(|| copy(&from, &to)) { @@ -1030,7 +997,7 @@ mod test { } }) - test!(fn copy_file_ok() { + iotest!(fn copy_file_ok() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1043,7 +1010,7 @@ mod test { assert_eq!(input.stat().perm, out.stat().perm); }) - test!(fn copy_file_dst_dir() { + iotest!(fn copy_file_dst_dir() { let tmpdir = tmpdir(); let out = tmpdir.join("out"); @@ -1053,7 +1020,7 @@ mod test { } }) - test!(fn copy_file_dst_exists() { + iotest!(fn copy_file_dst_exists() { let tmpdir = tmpdir(); let input = tmpdir.join("in"); let output = tmpdir.join("out"); @@ -1066,7 +1033,7 @@ mod test { (bytes!("foo")).to_owned()); }) - test!(fn copy_file_src_dir() { + iotest!(fn copy_file_src_dir() { let tmpdir = tmpdir(); let out = tmpdir.join("out"); @@ -1076,7 +1043,7 @@ mod test { assert!(!out.exists()); }) - test!(fn copy_file_preserves_perm_bits() { + iotest!(fn copy_file_preserves_perm_bits() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1091,7 +1058,7 @@ mod test { }) #[cfg(not(windows))] // FIXME(#10264) operation not permitted? - test!(fn symlinks_work() { + iotest!(fn symlinks_work() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1106,14 +1073,14 @@ mod test { }) #[cfg(not(windows))] // apparently windows doesn't like symlinks - test!(fn symlink_noexist() { + iotest!(fn symlink_noexist() { let tmpdir = tmpdir(); // symlinks can point to things that don't exist symlink(&tmpdir.join("foo"), &tmpdir.join("bar")); assert!(readlink(&tmpdir.join("bar")).unwrap() == tmpdir.join("foo")); }) - test!(fn readlink_not_symlink() { + iotest!(fn readlink_not_symlink() { let tmpdir = tmpdir(); match io::result(|| readlink(&*tmpdir)) { Ok(..) => fail!("wanted a failure"), @@ -1121,7 +1088,7 @@ mod test { } }) - test!(fn links_work() { + iotest!(fn links_work() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1147,7 +1114,7 @@ mod test { } }) - test!(fn chmod_works() { + iotest!(fn chmod_works() { let tmpdir = tmpdir(); let file = tmpdir.join("in.txt"); @@ -1164,7 +1131,7 @@ mod test { chmod(&file, io::UserFile); }) - test!(fn sync_doesnt_kill_anything() { + iotest!(fn sync_doesnt_kill_anything() { let tmpdir = tmpdir(); let path = tmpdir.join("in.txt"); @@ -1177,7 +1144,7 @@ mod test { drop(file); }) - test!(fn truncate_works() { + iotest!(fn truncate_works() { let tmpdir = tmpdir(); let path = tmpdir.join("in.txt"); @@ -1208,7 +1175,7 @@ mod test { drop(file); }) - test!(fn open_flavors() { + iotest!(fn open_flavors() { let tmpdir = tmpdir(); match io::result(|| File::open_mode(&tmpdir.join("a"), io::Open, diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index bd0b9e08b7c6d..2d52986294d1c 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -164,9 +164,6 @@ requests are implemented by descheduling the running task and performing an asynchronous request; the task is only resumed once the asynchronous request completes. -For blocking (but possibly more efficient) implementations, look -in the `io::native` module. - # Error Handling I/O is an area where nearly every operation can result in unexpected @@ -316,6 +313,9 @@ pub use self::net::udp::UdpStream; pub use self::pipe::PipeStream; pub use self::process::Process; +/// Various utility functions useful for writing I/O tests +pub mod test; + /// Synchronous, non-blocking filesystem operations. pub mod fs; @@ -349,8 +349,6 @@ pub mod timer; /// Buffered I/O wrappers pub mod buffered; -pub mod native; - /// Signal handling pub mod signal; diff --git a/src/libstd/io/net/addrinfo.rs b/src/libstd/io/net/addrinfo.rs index 7df4fdd226676..6d968de209ca8 100644 --- a/src/libstd/io/net/addrinfo.rs +++ b/src/libstd/io/net/addrinfo.rs @@ -18,8 +18,6 @@ getaddrinfo() */ use option::{Option, Some, None}; -use result::{Ok, Err}; -use io::{io_error}; use io::net::ip::{SocketAddr, IpAddr}; use rt::rtio::{IoFactory, LocalIo}; use vec::ImmutableVector; @@ -97,14 +95,7 @@ pub fn get_host_addresses(host: &str) -> Option<~[IpAddr]> { /// consumption just yet. fn lookup(hostname: Option<&str>, servname: Option<&str>, hint: Option) -> Option<~[Info]> { - let mut io = LocalIo::borrow(); - match io.get().get_host_addresses(hostname, servname, hint) { - Ok(i) => Some(i), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| io.get_host_addresses(hostname, servname, hint)) } #[cfg(test)] diff --git a/src/libstd/io/net/tcp.rs b/src/libstd/io/net/tcp.rs index a6230ede7e348..e7787692dd2f5 100644 --- a/src/libstd/io/net/tcp.rs +++ b/src/libstd/io/net/tcp.rs @@ -26,17 +26,9 @@ impl TcpStream { } pub fn connect(addr: SocketAddr) -> Option { - let result = { - let mut io = LocalIo::borrow(); - io.get().tcp_connect(addr) - }; - match result { - Ok(s) => Some(TcpStream::new(s)), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.tcp_connect(addr).map(TcpStream::new) + }) } pub fn peer_name(&mut self) -> Option { @@ -94,14 +86,9 @@ pub struct TcpListener { impl TcpListener { pub fn bind(addr: SocketAddr) -> Option { - let mut io = LocalIo::borrow(); - match io.get().tcp_bind(addr) { - Ok(l) => Some(TcpListener { obj: l }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.tcp_bind(addr).map(|l| TcpListener { obj: l }) + }) } pub fn socket_name(&mut self) -> Option { @@ -147,513 +134,473 @@ impl Acceptor for TcpAcceptor { #[cfg(test)] mod test { use super::*; - use rt::test::*; use io::net::ip::{Ipv4Addr, SocketAddr}; use io::*; + use io::test::{next_test_ip4, next_test_ip6}; use prelude::*; #[test] #[ignore] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let listener = TcpListener::bind(addr); - assert!(listener.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let listener = TcpListener::bind(addr); + assert!(listener.is_none()); + }); + assert!(called); } #[test] fn connect_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - let expected_error = if cfg!(unix) { - ConnectionRefused - } else { - // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error. - OtherIoError - }; - assert_eq!(e.kind, expected_error); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let stream = TcpStream::connect(addr); - assert!(stream.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + let expected_error = if cfg!(unix) { + ConnectionRefused + } else { + // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error. + OtherIoError + }; + assert_eq!(e.kind, expected_error); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let stream = TcpStream::connect(addr); + assert!(stream.is_none()); + }); + assert!(called); } #[test] fn smoke_test_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - } + let addr = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let mut stream = TcpStream::connect(addr); stream.write([99]); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); } #[test] fn smoke_test_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - } + let addr = next_test_ip6(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let mut stream = TcpStream::connect(addr); stream.write([99]); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); } #[test] fn read_eof_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - } + let addr = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); } #[test] fn read_eof_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - } + let addr = next_test_ip6(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); } #[test] fn read_eof_twice_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - io_error::cond.trap(|e| { - if cfg!(windows) { - assert_eq!(e.kind, NotConnected); - } else { - fail!(); - } - }).inside(|| { - let nread = stream.read(buf); - assert!(nread.is_none()); - }) - } + let addr = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + io_error::cond.trap(|e| { + if cfg!(windows) { + assert_eq!(e.kind, NotConnected); + } else { + fail!(); + } + }).inside(|| { + let nread = stream.read(buf); + assert!(nread.is_none()); + }) } #[test] fn read_eof_twice_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - io_error::cond.trap(|e| { - if cfg!(windows) { - assert_eq!(e.kind, NotConnected); - } else { - fail!(); - } - }).inside(|| { - let nread = stream.read(buf); - assert!(nread.is_none()); - }) - } + let addr = next_test_ip6(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + io_error::cond.trap(|e| { + if cfg!(windows) { + assert_eq!(e.kind, NotConnected); + } else { + fail!(); + } + }).inside(|| { + let nread = stream.read(buf); + assert!(nread.is_none()); + }) } #[test] fn write_close_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let buf = [0]; - loop { - let mut stop = false; - io_error::cond.trap(|e| { - // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED - // on windows - assert!(e.kind == ConnectionReset || - e.kind == BrokenPipe || - e.kind == ConnectionAborted, - "unknown error: {:?}", e); - stop = true; - }).inside(|| { - stream.write(buf); - }); - if stop { break } - } - } + let addr = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let buf = [0]; + loop { + let mut stop = false; + io_error::cond.trap(|e| { + // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED + // on windows + assert!(e.kind == ConnectionReset || + e.kind == BrokenPipe || + e.kind == ConnectionAborted, + "unknown error: {:?}", e); + stop = true; + }).inside(|| { + stream.write(buf); + }); + if stop { break } + } } #[test] fn write_close_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let buf = [0]; - loop { - let mut stop = false; - io_error::cond.trap(|e| { - // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED - // on windows - assert!(e.kind == ConnectionReset || - e.kind == BrokenPipe || - e.kind == ConnectionAborted, - "unknown error: {:?}", e); - stop = true; - }).inside(|| { - stream.write(buf); - }); - if stop { break } - } - } + let addr = next_test_ip6(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let buf = [0]; + loop { + let mut stop = false; + io_error::cond.trap(|e| { + // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED + // on windows + assert!(e.kind == ConnectionReset || + e.kind == BrokenPipe || + e.kind == ConnectionAborted, + "unknown error: {:?}", e); + stop = true; + }).inside(|| { + stream.write(buf); + }); + if stop { break } + } } #[test] fn multiple_connect_serial_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let max = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for ref mut stream in acceptor.incoming().take(max) { - let mut buf = [0]; - stream.read(buf); - assert_eq!(buf[0], 99); - } - } + let addr = next_test_ip4(); + let max = 10; + let (port, chan) = Chan::new(); + do spawn { port.recv(); max.times(|| { let mut stream = TcpStream::connect(addr); stream.write([99]); }); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for ref mut stream in acceptor.incoming().take(max) { + let mut buf = [0]; + stream.read(buf); + assert_eq!(buf[0], 99); + } } #[test] fn multiple_connect_serial_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let max = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for ref mut stream in acceptor.incoming().take(max) { - let mut buf = [0]; - stream.read(buf); - assert_eq!(buf[0], 99); - } - } + let addr = next_test_ip6(); + let max = 10; + let (port, chan) = Chan::new(); + do spawn { port.recv(); max.times(|| { let mut stream = TcpStream::connect(addr); stream.write([99]); }); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for ref mut stream in acceptor.incoming().take(max) { + let mut buf = [0]; + stream.read(buf); + assert_eq!(buf[0], 99); + } } #[test] fn multiple_connect_interleaved_greedy_schedule_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - // Start another task to handle the connection - do spawntask { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == i as u8); - debug!("read"); - } + let addr = next_test_ip4(); + static MAX: int = 10; + let (port, chan) = Chan::new(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { + // Start another task to handle the connection + do spawn { + let mut stream = stream; + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == i as u8); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([i as u8]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([i as u8]); } } } #[test] fn multiple_connect_interleaved_greedy_schedule_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - // Start another task to handle the connection - do spawntask { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == i as u8); - debug!("read"); - } + let addr = next_test_ip6(); + static MAX: int = 10; + let (port, chan) = Chan::<()>::new(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { + // Start another task to handle the connection + do spawn { + let mut stream = stream; + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == i as u8); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([i as u8]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([i as u8]); } } } #[test] fn multiple_connect_interleaved_lazy_schedule_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for stream in acceptor.incoming().take(MAX as uint) { - // Start another task to handle the connection - do spawntask_later { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - debug!("read"); - } + let addr = next_test_ip4(); + static MAX: int = 10; + let (port, chan) = Chan::new(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for stream in acceptor.incoming().take(MAX as uint) { + // Start another task to handle the connection + do spawn { + let mut stream = stream; + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask_later { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([99]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([99]); } } } #[test] fn multiple_connect_interleaved_lazy_schedule_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for stream in acceptor.incoming().take(MAX as uint) { - // Start another task to handle the connection - do spawntask_later { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - debug!("read"); - } + let addr = next_test_ip6(); + static MAX: int = 10; + let (port, chan) = Chan::new(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for stream in acceptor.incoming().take(MAX as uint) { + // Start another task to handle the connection + do spawn { + let mut stream = stream; + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask_later { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([99]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([99]); } } } #[cfg(test)] fn socket_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - do spawntask { - let mut listener = TcpListener::bind(addr).unwrap(); - - // Make sure socket_name gives - // us the socket we binded to. - let so_name = listener.socket_name(); - assert!(so_name.is_some()); - assert_eq!(addr, so_name.unwrap()); + let mut listener = TcpListener::bind(addr).unwrap(); - } - } + // Make sure socket_name gives + // us the socket we binded to. + let so_name = listener.socket_name(); + assert!(so_name.is_some()); + assert_eq!(addr, so_name.unwrap()); } #[cfg(test)] fn peer_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); + let (port, chan) = Chan::new(); - acceptor.accept(); - } + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + acceptor.accept(); + } - port.recv(); - let stream = TcpStream::connect(addr); + port.recv(); + let stream = TcpStream::connect(addr); - assert!(stream.is_some()); - let mut stream = stream.unwrap(); + assert!(stream.is_some()); + let mut stream = stream.unwrap(); - // Make sure peer_name gives us the - // address/port of the peer we've - // connected to. - let peer_name = stream.peer_name(); - assert!(peer_name.is_some()); - assert_eq!(addr, peer_name.unwrap()); - } + // Make sure peer_name gives us the + // address/port of the peer we've + // connected to. + let peer_name = stream.peer_name(); + assert!(peer_name.is_some()); + assert_eq!(addr, peer_name.unwrap()); } #[test] @@ -668,5 +615,4 @@ mod test { //peer_name(next_test_ip6()); socket_name(next_test_ip6()); } - } diff --git a/src/libstd/io/net/udp.rs b/src/libstd/io/net/udp.rs index 1e56f964bea52..7cb8f741cf3c1 100644 --- a/src/libstd/io/net/udp.rs +++ b/src/libstd/io/net/udp.rs @@ -21,14 +21,9 @@ pub struct UdpSocket { impl UdpSocket { pub fn bind(addr: SocketAddr) -> Option { - let mut io = LocalIo::borrow(); - match io.get().udp_bind(addr) { - Ok(s) => Some(UdpSocket { obj: s }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.udp_bind(addr).map(|s| UdpSocket { obj: s }) + }) } pub fn recvfrom(&mut self, buf: &mut [u8]) -> Option<(uint, SocketAddr)> { @@ -104,52 +99,32 @@ impl Writer for UdpStream { #[cfg(test)] mod test { use super::*; - use rt::test::*; use io::net::ip::{Ipv4Addr, SocketAddr}; use io::*; + use io::test::*; use prelude::*; #[test] #[ignore] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let socket = UdpSocket::bind(addr); - assert!(socket.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let socket = UdpSocket::bind(addr); + assert!(socket.is_none()); + }); + assert!(called); } #[test] fn socket_smoke_test_ip4() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip4(); - let client_ip = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(ref mut server) => { - chan.send(()); - let mut buf = [0]; - match server.recvfrom(buf) { - Some((nread, src)) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - assert_eq!(src, client_ip); - } - None => fail!() - } - } - None => fail!() - } - } + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { match UdpSocket::bind(client_ip) { Some(ref mut client) => { port.recv(); @@ -158,33 +133,31 @@ mod test { None => fail!() } } - } - #[test] - fn socket_smoke_test_ip6() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip6(); - let client_ip = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(ref mut server) => { - chan.send(()); - let mut buf = [0]; - match server.recvfrom(buf) { - Some((nread, src)) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - assert_eq!(src, client_ip); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(ref mut server) => { + chan.send(()); + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); } None => fail!() } } + None => fail!() + } + } + #[test] + fn socket_smoke_test_ip6() { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + let (port, chan) = Chan::<()>::new(); + + do spawn { match UdpSocket::bind(client_ip) { Some(ref mut client) => { port.recv(); @@ -193,34 +166,31 @@ mod test { None => fail!() } } - } - #[test] - fn stream_smoke_test_ip4() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip4(); - let client_ip = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(server) => { - let server = ~server; - let mut stream = server.connect(client_ip); - chan.send(()); - let mut buf = [0]; - match stream.read(buf) { - Some(nread) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(ref mut server) => { + chan.send(()); + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); } None => fail!() } } + None => fail!() + } + } + + #[test] + fn stream_smoke_test_ip4() { + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + let (port, chan) = Chan::new(); + do spawn { match UdpSocket::bind(client_ip) { Some(client) => { let client = ~client; @@ -231,34 +201,32 @@ mod test { None => fail!() } } - } - #[test] - fn stream_smoke_test_ip6() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip6(); - let client_ip = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(server) => { - let server = ~server; - let mut stream = server.connect(client_ip); - chan.send(()); - let mut buf = [0]; - match stream.read(buf) { - Some(nread) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + chan.send(()); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); } None => fail!() } } + None => fail!() + } + } + + #[test] + fn stream_smoke_test_ip6() { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + let (port, chan) = Chan::new(); + do spawn { match UdpSocket::bind(client_ip) { Some(client) => { let client = ~client; @@ -269,25 +237,36 @@ mod test { None => fail!() } } + + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + chan.send(()); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + } + None => fail!() + } + } + None => fail!() + } } - #[cfg(test)] fn socket_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - do spawntask { - let server = UdpSocket::bind(addr); - - assert!(server.is_some()); - let mut server = server.unwrap(); + let server = UdpSocket::bind(addr); - // Make sure socket_name gives - // us the socket we binded to. - let so_name = server.socket_name(); - assert!(so_name.is_some()); - assert_eq!(addr, so_name.unwrap()); + assert!(server.is_some()); + let mut server = server.unwrap(); - } - } + // Make sure socket_name gives + // us the socket we binded to. + let so_name = server.socket_name(); + assert!(so_name.is_some()); + assert_eq!(addr, so_name.unwrap()); } #[test] diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index 2766aa9ad2738..01b409d43163a 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -59,14 +59,9 @@ impl UnixStream { /// stream.write([1, 2, 3]); /// pub fn connect(path: &P) -> Option { - let mut io = LocalIo::borrow(); - match io.get().unix_connect(&path.to_c_str()) { - Ok(s) => Some(UnixStream::new(s)), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.unix_connect(&path.to_c_str()).map(UnixStream::new) + }) } } @@ -107,14 +102,9 @@ impl UnixListener { /// } /// pub fn bind(path: &P) -> Option { - let mut io = LocalIo::borrow(); - match io.get().unix_bind(&path.to_c_str()) { - Ok(s) => Some(UnixListener{ obj: s }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s }) + }) } } @@ -150,55 +140,49 @@ impl Acceptor for UnixAcceptor { mod tests { use prelude::*; use super::*; - use rt::test::*; use io::*; + use io::test::*; fn smalltest(server: proc(UnixStream), client: proc(UnixStream)) { - do run_in_mt_newsched_task { - let path1 = next_test_unix(); - let path2 = path1.clone(); - let (client, server) = (client, server); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = UnixListener::bind(&path1).listen(); - chan.send(()); - server(acceptor.accept().unwrap()); - } + let path1 = next_test_unix(); + let path2 = path1.clone(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); client(UnixStream::connect(&path2).unwrap()); } + + let mut acceptor = UnixListener::bind(&path1).listen(); + chan.send(()); + server(acceptor.accept().unwrap()); } #[test] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let listener = UnixListener::bind(&("path/to/nowhere")); - assert!(listener.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let listener = UnixListener::bind(&("path/to/nowhere")); + assert!(listener.is_none()); + }); + assert!(called); } #[test] fn connect_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert_eq!(e.kind, FileNotFound); - called = true; - }).inside(|| { - let stream = UnixStream::connect(&("path/to/nowhere")); - assert!(stream.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert_eq!(e.kind, + if cfg!(windows) {OtherIoError} else {FileNotFound}); + called = true; + }).inside(|| { + let stream = UnixStream::connect(&("path/to/nowhere")); + assert!(stream.is_none()); + }); + assert!(called); } #[test] @@ -244,37 +228,33 @@ mod tests { #[test] fn accept_lots() { - do run_in_mt_newsched_task { - let times = 10; - let path1 = next_test_unix(); - let path2 = path1.clone(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = UnixListener::bind(&path1).listen(); - chan.send(()); - times.times(|| { - let mut client = acceptor.accept(); - let mut buf = [0]; - client.read(buf); - assert_eq!(buf[0], 100); - }) - } + let times = 10; + let path1 = next_test_unix(); + let path2 = path1.clone(); + let (port, chan) = Chan::new(); + do spawn { port.recv(); times.times(|| { let mut stream = UnixStream::connect(&path2); stream.write([100]); }) } + + let mut acceptor = UnixListener::bind(&path1).listen(); + chan.send(()); + times.times(|| { + let mut client = acceptor.accept(); + let mut buf = [0]; + client.read(buf); + assert_eq!(buf[0], 100); + }) } #[test] fn path_exists() { - do run_in_mt_newsched_task { - let path = next_test_unix(); - let _acceptor = UnixListener::bind(&path).listen(); - assert!(path.exists()); - } + let path = next_test_unix(); + let _acceptor = UnixListener::bind(&path).listen(); + assert!(path.exists()); } } diff --git a/src/libstd/io/option.rs b/src/libstd/io/option.rs index 61c5411f3602f..a661d6ab7eb79 100644 --- a/src/libstd/io/option.rs +++ b/src/libstd/io/option.rs @@ -106,53 +106,46 @@ impl> Acceptor for Option { mod test { use option::*; use super::super::mem::*; - use rt::test::*; use super::super::{PreviousIoError, io_error}; #[test] fn test_option_writer() { - do run_in_mt_newsched_task { - let mut writer: Option = Some(MemWriter::new()); - writer.write([0, 1, 2]); - writer.flush(); - assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]); - } + let mut writer: Option = Some(MemWriter::new()); + writer.write([0, 1, 2]); + writer.flush(); + assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]); } #[test] fn test_option_writer_error() { - do run_in_mt_newsched_task { - let mut writer: Option = None; - - let mut called = false; - io_error::cond.trap(|err| { - assert_eq!(err.kind, PreviousIoError); - called = true; - }).inside(|| { - writer.write([0, 0, 0]); - }); - assert!(called); - - let mut called = false; - io_error::cond.trap(|err| { - assert_eq!(err.kind, PreviousIoError); - called = true; - }).inside(|| { - writer.flush(); - }); - assert!(called); - } + let mut writer: Option = None; + + let mut called = false; + io_error::cond.trap(|err| { + assert_eq!(err.kind, PreviousIoError); + called = true; + }).inside(|| { + writer.write([0, 0, 0]); + }); + assert!(called); + + let mut called = false; + io_error::cond.trap(|err| { + assert_eq!(err.kind, PreviousIoError); + called = true; + }).inside(|| { + writer.flush(); + }); + assert!(called); } #[test] fn test_option_reader() { - do run_in_mt_newsched_task { - let mut reader: Option = Some(MemReader::new(~[0, 1, 2, 3])); - let mut buf = [0, 0]; - reader.read(buf); - assert_eq!(buf, [0, 1]); - assert!(!reader.eof()); - } + let mut reader: Option = Some(MemReader::new(~[0, 1, 2, 3])); + let mut buf = [0, 0]; + reader.read(buf); + assert_eq!(buf, [0, 1]); + assert!(!reader.eof()); } #[test] diff --git a/src/libstd/io/pipe.rs b/src/libstd/io/pipe.rs index 252575ee4454f..2349c64a84b27 100644 --- a/src/libstd/io/pipe.rs +++ b/src/libstd/io/pipe.rs @@ -14,10 +14,9 @@ //! enough so that pipes can be created to child processes. use prelude::*; -use super::{Reader, Writer}; use io::{io_error, EndOfFile}; -use io::native::file; -use rt::rtio::{LocalIo, RtioPipe}; +use libc; +use rt::rtio::{RtioPipe, LocalIo}; pub struct PipeStream { priv obj: ~RtioPipe, @@ -43,15 +42,10 @@ impl PipeStream { /// /// If the pipe cannot be created, an error will be raised on the /// `io_error` condition. - pub fn open(fd: file::fd_t) -> Option { - let mut io = LocalIo::borrow(); - match io.get().pipe_open(fd) { - Ok(obj) => Some(PipeStream { obj: obj }), - Err(e) => { - io_error::cond.raise(e); - None - } - } + pub fn open(fd: libc::c_int) -> Option { + LocalIo::maybe_raise(|io| { + io.pipe_open(fd).map(|obj| PipeStream { obj: obj }) + }) } pub fn new(inner: ~RtioPipe) -> PipeStream { diff --git a/src/libstd/io/process.rs b/src/libstd/io/process.rs index 001faa1ecaf7b..bbb2a7ef3984d 100644 --- a/src/libstd/io/process.rs +++ b/src/libstd/io/process.rs @@ -119,19 +119,17 @@ impl Process { /// Creates a new pipe initialized, but not bound to any particular /// source/destination pub fn new(config: ProcessConfig) -> Option { - let mut io = LocalIo::borrow(); - match io.get().spawn(config) { - Ok((p, io)) => Some(Process{ - handle: p, - io: io.move_iter().map(|p| - p.map(|p| io::PipeStream::new(p)) - ).collect() - }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + let mut config = Some(config); + LocalIo::maybe_raise(|io| { + io.spawn(config.take_unwrap()).map(|(p, io)| { + Process { + handle: p, + io: io.move_iter().map(|p| { + p.map(|p| io::PipeStream::new(p)) + }).collect() + } + }) + }) } /// Returns the process id of this child process diff --git a/src/libstd/io/signal.rs b/src/libstd/io/signal.rs index 00d84e22c25b2..4cde35796a642 100644 --- a/src/libstd/io/signal.rs +++ b/src/libstd/io/signal.rs @@ -23,8 +23,7 @@ use clone::Clone; use comm::{Port, SharedChan}; use container::{Map, MutableMap}; use hashmap; -use io::io_error; -use result::{Err, Ok}; +use option::{Some, None}; use rt::rtio::{IoFactory, LocalIo, RtioSignal}; #[repr(int)] @@ -122,16 +121,14 @@ impl Listener { if self.handles.contains_key(&signum) { return true; // self is already listening to signum, so succeed } - let mut io = LocalIo::borrow(); - match io.get().signal(signum, self.chan.clone()) { - Ok(w) => { - self.handles.insert(signum, w); + match LocalIo::maybe_raise(|io| { + io.signal(signum, self.chan.clone()) + }) { + Some(handle) => { + self.handles.insert(signum, handle); true - }, - Err(ioerr) => { - io_error::cond.raise(ioerr); - false } + None => false } } diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 41337075aa9b9..1e4fa7968dc42 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -27,13 +27,13 @@ out.write(bytes!("Hello, world!")); */ use fmt; +use io::buffered::LineBufferedWriter; +use io::{Reader, Writer, io_error, IoError, OtherIoError, + standard_error, EndOfFile}; use libc; use option::{Option, Some, None}; use result::{Ok, Err}; -use io::buffered::LineBufferedWriter; use rt::rtio::{DontClose, IoFactory, LocalIo, RtioFileStream, RtioTTY}; -use super::{Reader, Writer, io_error, IoError, OtherIoError, - standard_error, EndOfFile}; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -69,19 +69,12 @@ enum StdSource { } fn src(fd: libc::c_int, readable: bool, f: |StdSource| -> T) -> T { - let mut io = LocalIo::borrow(); - match io.get().tty_open(fd, readable) { - Ok(tty) => f(TTY(tty)), - Err(_) => { - // It's not really that desirable if these handles are closed - // synchronously, and because they're squirreled away in a task - // structure the destructors will be run when the task is - // attempted to get destroyed. This means that if we run a - // synchronous destructor we'll attempt to do some scheduling - // operations which will just result in sadness. - f(File(io.get().fs_from_raw_fd(fd, DontClose))) - } - } + LocalIo::maybe_raise(|io| { + Ok(match io.tty_open(fd, readable) { + Ok(tty) => f(TTY(tty)), + Err(_) => f(File(io.fs_from_raw_fd(fd, DontClose))), + }) + }).unwrap() } /// Creates a new non-blocking handle to the stdin of the current process. @@ -138,7 +131,17 @@ fn with_task_stdout(f: |&mut Writer|) { } None => { - let mut io = stdout(); + struct Stdout; + impl Writer for Stdout { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDOUT_FILENO, + data.as_ptr() as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut io = Stdout; f(&mut io as &mut Writer); } } @@ -304,23 +307,10 @@ impl Writer for StdWriter { #[cfg(test)] mod tests { - use super::*; - use rt::test::run_in_newsched_task; - - #[test] - fn smoke_uv() { + iotest!(fn smoke() { // Just make sure we can acquire handles stdin(); stdout(); stderr(); - } - - #[test] - fn smoke_native() { - do run_in_newsched_task { - stdin(); - stdout(); - stderr(); - } - } + }) } diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs new file mode 100644 index 0000000000000..4be1122796584 --- /dev/null +++ b/src/libstd/io/test.rs @@ -0,0 +1,195 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_escape]; + +use os; +use prelude::*; +use rand; +use rand::Rng; +use std::io::net::ip::*; +use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed}; + +macro_rules! iotest ( + { fn $name:ident() $b:block } => ( + mod $name { + #[allow(unused_imports)]; + + use super::super::*; + use super::*; + use io; + use prelude::*; + use io::*; + use io::fs::*; + use io::net::tcp::*; + use io::net::ip::*; + use io::net::udp::*; + #[cfg(unix)] + use io::net::unix::*; + use str; + use util; + + fn f() $b + + #[test] fn green() { f() } + #[test] fn native() { + use native; + let (p, c) = Chan::new(); + do native::task::spawn { c.send(f()) } + p.recv(); + } + } + ) +) + +/// Get a port number, starting at 9600, for use in tests +pub fn next_test_port() -> u16 { + static mut next_offset: AtomicUint = INIT_ATOMIC_UINT; + unsafe { + base_port() + next_offset.fetch_add(1, Relaxed) as u16 + } +} + +/// Get a temporary path which could be the location of a unix socket +pub fn next_test_unix() -> Path { + if cfg!(unix) { + os::tmpdir().join(rand::task_rng().gen_ascii_str(20)) + } else { + Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20)) + } +} + +/// Get a unique IPv4 localhost:port pair starting at 9600 +pub fn next_test_ip4() -> SocketAddr { + SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() } +} + +/// Get a unique IPv6 localhost:port pair starting at 9600 +pub fn next_test_ip6() -> SocketAddr { + SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() } +} + +/* +XXX: Welcome to MegaHack City. + +The bots run multiple builds at the same time, and these builds +all want to use ports. This function figures out which workspace +it is running in and assigns a port range based on it. +*/ +fn base_port() -> u16 { + + let base = 9600u16; + let range = 1000u16; + + let bases = [ + ("32-opt", base + range * 1), + ("32-noopt", base + range * 2), + ("64-opt", base + range * 3), + ("64-noopt", base + range * 4), + ("64-opt-vg", base + range * 5), + ("all-opt", base + range * 6), + ("snap3", base + range * 7), + ("dist", base + range * 8) + ]; + + // FIXME (#9639): This needs to handle non-utf8 paths + let path = os::getcwd(); + let path_s = path.as_str().unwrap(); + + let mut final_base = base; + + for &(dir, base) in bases.iter() { + if path_s.contains(dir) { + final_base = base; + break; + } + } + + return final_base; +} + +pub fn raise_fd_limit() { + unsafe { darwin_fd_limit::raise_fd_limit() } +} + +#[cfg(target_os="macos")] +#[allow(non_camel_case_types)] +mod darwin_fd_limit { + /*! + * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the + * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low + * for our multithreaded scheduler testing, depending on the number of cores available. + * + * This fixes issue #7772. + */ + + use libc; + type rlim_t = libc::uint64_t; + struct rlimit { + rlim_cur: rlim_t, + rlim_max: rlim_t + } + #[nolink] + extern { + // name probably doesn't need to be mut, but the C function doesn't specify const + fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint, + oldp: *mut libc::c_void, oldlenp: *mut libc::size_t, + newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int; + fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int; + fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int; + } + static CTL_KERN: libc::c_int = 1; + static KERN_MAXFILESPERPROC: libc::c_int = 29; + static RLIMIT_NOFILE: libc::c_int = 8; + + pub unsafe fn raise_fd_limit() { + // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc + // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value. + use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null}; + use mem::size_of_val; + use os::last_os_error; + + // Fetch the kern.maxfilesperproc value + let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC]; + let mut maxfiles: libc::c_int = 0; + let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; + if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2, + to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void, + to_mut_unsafe_ptr(&mut size), + mut_null(), 0) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling sysctl: {}", err); + return; + } + + // Fetch the current resource limits + let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0}; + if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling getrlimit: {}", err); + return; + } + + // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit + rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max); + + // Set our newly-increased resource limit + if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling setrlimit: {}", err); + return; + } + } +} + +#[cfg(not(target_os="macos"))] +mod darwin_fd_limit { + pub unsafe fn raise_fd_limit() {} +} diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs index 9d4a72509e7fa..7c9aa28bfe9a8 100644 --- a/src/libstd/io/timer.rs +++ b/src/libstd/io/timer.rs @@ -39,9 +39,7 @@ loop { */ use comm::Port; -use option::{Option, Some, None}; -use result::{Ok, Err}; -use io::io_error; +use option::Option; use rt::rtio::{IoFactory, LocalIo, RtioTimer}; pub struct Timer { @@ -60,15 +58,7 @@ impl Timer { /// for a number of milliseconds, or to possibly create channels which will /// get notified after an amount of time has passed. pub fn new() -> Option { - let mut io = LocalIo::borrow(); - match io.get().timer_init() { - Ok(t) => Some(Timer { obj: t }), - Err(ioerr) => { - debug!("Timer::init: failed to init: {:?}", ioerr); - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t })) } /// Blocks the current task for `msecs` milliseconds. @@ -108,77 +98,60 @@ impl Timer { mod test { use prelude::*; use super::*; - use rt::test::*; #[test] fn test_io_timer_sleep_simple() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.sleep(1); - } + let mut timer = Timer::new().unwrap(); + timer.sleep(1); } #[test] fn test_io_timer_sleep_oneshot() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.oneshot(1).recv(); - } + let mut timer = Timer::new().unwrap(); + timer.oneshot(1).recv(); } #[test] fn test_io_timer_sleep_oneshot_forget() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.oneshot(100000000000); - } + let mut timer = Timer::new().unwrap(); + timer.oneshot(100000000000); } #[test] fn oneshot_twice() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port1 = timer.oneshot(10000); - let port = timer.oneshot(1); - port.recv(); - assert_eq!(port1.try_recv(), None); - } + let mut timer = Timer::new().unwrap(); + let port1 = timer.oneshot(10000); + let port = timer.oneshot(1); + port.recv(); + assert_eq!(port1.try_recv(), None); } #[test] fn test_io_timer_oneshot_then_sleep() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port = timer.oneshot(100000000000); - timer.sleep(1); // this should invalidate the port + let mut timer = Timer::new().unwrap(); + let port = timer.oneshot(100000000000); + timer.sleep(1); // this should invalidate the port - assert_eq!(port.try_recv(), None); - } + assert_eq!(port.try_recv(), None); } #[test] fn test_io_timer_sleep_periodic() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port = timer.periodic(1); - port.recv(); - port.recv(); - port.recv(); - } + let mut timer = Timer::new().unwrap(); + let port = timer.periodic(1); + port.recv(); + port.recv(); + port.recv(); } #[test] fn test_io_timer_sleep_periodic_forget() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.periodic(100000000000); - } + let mut timer = Timer::new().unwrap(); + timer.periodic(100000000000); } #[test] fn test_io_timer_sleep_standalone() { - do run_in_mt_newsched_task { - sleep(1) - } + sleep(1) } } diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index b2b856c5c83c8..4f633a63babc4 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -65,13 +65,15 @@ // When testing libstd, bring in libuv as the I/O backend so tests can print // things and all of the std::io tests have an I/O interface to run on top // of -#[cfg(test)] extern mod rustuv = "rustuv#0.9-pre"; +#[cfg(test)] extern mod rustuv = "rustuv"; +#[cfg(test)] extern mod native = "native"; +#[cfg(test)] extern mod green = "green"; // Make extra accessible for benchmarking -#[cfg(test)] extern mod extra = "extra#0.9-pre"; +#[cfg(test)] extern mod extra = "extra"; // Make std testable by not duplicating lang items. See #2912 -#[cfg(test)] extern mod realstd = "std#0.9-pre"; +#[cfg(test)] extern mod realstd = "std"; #[cfg(test)] pub use kinds = realstd::kinds; #[cfg(test)] pub use ops = realstd::ops; #[cfg(test)] pub use cmp = realstd::cmp; @@ -159,6 +161,7 @@ pub mod trie; pub mod task; pub mod comm; pub mod local_data; +pub mod sync; /* Runtime and platform support */ diff --git a/src/libstd/local_data.rs b/src/libstd/local_data.rs index 652aa4d8198a2..d7e11d2f3a70f 100644 --- a/src/libstd/local_data.rs +++ b/src/libstd/local_data.rs @@ -432,6 +432,7 @@ mod tests { } #[test] + #[allow(dead_code)] fn test_tls_overwrite_multiple_types() { static str_key: Key<~str> = &Key; static box_key: Key<@()> = &Key; diff --git a/src/libstd/logging.rs b/src/libstd/logging.rs index dbe8b3247c0b4..fb83cfdd6ea8a 100644 --- a/src/libstd/logging.rs +++ b/src/libstd/logging.rs @@ -118,26 +118,16 @@ pub static ERROR: u32 = 1; /// It is not recommended to call this function directly, rather it should be /// invoked through the logging family of macros. pub fn log(_level: u32, args: &fmt::Arguments) { - unsafe { - let optional_task: Option<*mut Task> = Local::try_unsafe_borrow(); - match optional_task { - Some(local) => { - // Lazily initialize the local task's logger - match (*local).logger { - // Use the available logger if we have one - Some(ref mut logger) => { logger.log(args); } - None => { - let mut logger = StdErrLogger::new(); - logger.log(args); - (*local).logger = Some(logger); - } - } - } - // If there's no local task, then always log to stderr - None => { - let mut logger = StdErrLogger::new(); - logger.log(args); - } - } + let mut logger = { + let mut task = Local::borrow(None::); + task.get().logger.take() + }; + + if logger.is_none() { + logger = Some(StdErrLogger::new()); } + logger.get_mut_ref().log(args); + + let mut task = Local::borrow(None::); + task.get().logger = logger; } diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 8da7c0340f7fe..8f2f219088504 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -28,8 +28,6 @@ #[allow(missing_doc)]; -#[cfg(unix)] -use c_str::CString; use clone::Clone; use container::Container; #[cfg(target_os = "macos")] @@ -43,8 +41,7 @@ use ptr; use str; use to_str; use unstable::finally::Finally; - -pub use os::consts::*; +use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; /// Delegates to the libc close() function, returning the same return value. pub fn close(fd: c_int) -> c_int { @@ -58,6 +55,8 @@ static BUF_BYTES : uint = 2048u; #[cfg(unix)] pub fn getcwd() -> Path { + use c_str::CString; + let mut buf = [0 as libc::c_char, ..BUF_BYTES]; unsafe { if libc::getcwd(buf.as_mut_ptr(), buf.len() as size_t).is_null() { @@ -333,7 +332,7 @@ pub fn pipe() -> Pipe { /// Returns the proper dll filename for the given basename of a file. pub fn dll_filename(base: &str) -> ~str { - format!("{}{}{}", DLL_PREFIX, base, DLL_SUFFIX) + format!("{}{}{}", consts::DLL_PREFIX, base, consts::DLL_SUFFIX) } /// Optionally returns the filesystem path to the current executable which is @@ -675,17 +674,26 @@ pub fn last_os_error() -> ~str { strerror() } +static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT; + /** * Sets the process exit code * * Sets the exit code returned by the process if all supervised tasks * terminate successfully (without failing). If the current root task fails * and is supervised by the scheduler then any user-specified exit status is - * ignored and the process exits with the default failure status + * ignored and the process exits with the default failure status. + * + * Note that this is not synchronized against modifications of other threads. */ pub fn set_exit_status(code: int) { - use rt; - rt::set_exit_status(code); + unsafe { EXIT_STATUS.store(code, SeqCst) } +} + +/// Fetches the process's current exit code. This defaults to 0 and can change +/// by calling `set_exit_status`. +pub fn get_exit_status() -> int { + unsafe { EXIT_STATUS.load(SeqCst) } } #[cfg(target_os = "macos")] diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs index 423981d9e9181..d1e97cb6ec0f8 100644 --- a/src/libstd/rt/borrowck.rs +++ b/src/libstd/rt/borrowck.rs @@ -12,9 +12,8 @@ use c_str::{ToCStr, CString}; use libc::{c_char, size_t}; use option::{Option, None, Some}; use ptr::RawPtr; -use rt::env; +use rt; use rt::local::Local; -use rt::task; use rt::task::Task; use str::OwnedStr; use str; @@ -62,7 +61,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) match try_take_task_borrow_list() { None => { // not recording borrows let msg = "borrowed"; - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } Some(borrow_list) => { // recording borrows let mut msg = ~"borrowed"; @@ -76,7 +75,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) sep = " and at "; } } - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } } } @@ -95,7 +94,7 @@ unsafe fn debug_borrow>(tag: &'static str, //! A useful debugging function that prints a pointer + tag + newline //! without allocating memory. - if ENABLE_DEBUG && env::debug_borrow() { + if ENABLE_DEBUG && rt::env::debug_borrow() { debug_borrow_slow(tag, p, old_bits, new_bits, filename, line); } @@ -180,7 +179,7 @@ pub unsafe fn unrecord_borrow(a: *u8, if br.alloc != a || br.file != file || br.line != line { let err = format!("wrong borrow found, br={:?}", br); err.with_c_str(|msg_p| { - task::begin_unwind_raw(msg_p, file, line) + rt::begin_unwind_raw(msg_p, file, line) }) } borrow_list diff --git a/src/libstd/rt/crate_map.rs b/src/libstd/rt/crate_map.rs index 22fc3f0ab56c0..d9b40cfbb6e8c 100644 --- a/src/libstd/rt/crate_map.rs +++ b/src/libstd/rt/crate_map.rs @@ -30,7 +30,7 @@ pub struct CrateMap<'a> { version: i32, entries: &'a [ModEntry<'a>], children: &'a [&'a CrateMap<'a>], - event_loop_factory: Option ~EventLoop>, + event_loop_factory: Option ~EventLoop>, } #[cfg(not(windows))] diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs index d1bd450afe275..f3fa482b18cca 100644 --- a/src/libstd/rt/env.rs +++ b/src/libstd/rt/env.rs @@ -17,7 +17,7 @@ use os; // Note that these are all accessed without any synchronization. // They are expected to be initialized once then left alone. -static mut MIN_STACK: uint = 2000000; +static mut MIN_STACK: uint = 2 * 1024 * 1024; static mut DEBUG_BORROW: bool = false; static mut POISON_ON_FREE: bool = false; diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs deleted file mode 100644 index f4f128cf5aac1..0000000000000 --- a/src/libstd/rt/kill.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - -Task death: asynchronous killing, linked failure, exit code propagation. - -This file implements two orthogonal building-blocks for communicating failure -between tasks. One is 'linked failure' or 'task killing', that is, a failing -task causing other tasks to fail promptly (even those that are blocked on -pipes or I/O). The other is 'exit code propagation', which affects the result -observed by the parent of a task::try task that itself spawns child tasks -(such as any #[test] function). In both cases the data structures live in -KillHandle. - - -I. Task killing. - -The model for killing involves two atomic flags, the "kill flag" and the -"unkillable flag". Operations on the kill flag include: - -- In the taskgroup code (task/spawn.rs), tasks store a clone of their - KillHandle in their shared taskgroup. Another task in the group that fails - will use that handle to call kill(). -- When a task blocks, it turns its ~Task into a BlockedTask by storing a - the transmuted ~Task pointer inside the KillHandle's kill flag. A task - trying to block and a task trying to kill it can simultaneously access the - kill flag, after which the task will get scheduled and fail (no matter who - wins the race). Likewise, a task trying to wake a blocked task normally and - a task trying to kill it can simultaneously access the flag; only one will - get the task to reschedule it. - -Operations on the unkillable flag include: - -- When a task becomes unkillable, it swaps on the flag to forbid any killer - from waking it up while it's blocked inside the unkillable section. If a - kill was already pending, the task fails instead of becoming unkillable. -- When a task is done being unkillable, it restores the flag to the normal - running state. If a kill was received-but-blocked during the unkillable - section, the task fails at this later point. -- When a task tries to kill another task, before swapping on the kill flag, it - first swaps on the unkillable flag, to see if it's "allowed" to wake up the - task. If it isn't, the killed task will receive the signal when it becomes - killable again. (Of course, a task trying to wake the task normally (e.g. - sending on a channel) does not access the unkillable flag at all.) - -Why do we not need acquire/release barriers on any of the kill flag swaps? -This is because barriers establish orderings between accesses on different -memory locations, but each kill-related operation is only a swap on a single -location, so atomicity is all that matters. The exception is kill(), which -does a swap on both flags in sequence. kill() needs no barriers because it -does not matter if its two accesses are seen reordered on another CPU: if a -killer does perform both writes, it means it saw a KILL_RUNNING in the -unkillable flag, which means an unkillable task will see KILL_KILLED and fail -immediately (rendering the subsequent write to the kill flag unnecessary). - - -II. Exit code propagation. - -The basic model for exit code propagation, which is used with the "watched" -spawn mode (on by default for linked spawns, off for supervised and unlinked -spawns), is that a parent will wait for all its watched children to exit -before reporting whether it succeeded or failed. A watching parent will only -report success if it succeeded and all its children also reported success; -otherwise, it will report failure. This is most useful for writing test cases: - - ``` -#[test] -fn test_something_in_another_task { - do spawn { - assert!(collatz_conjecture_is_false()); - } -} - ``` - -Here, as the child task will certainly outlive the parent task, we might miss -the failure of the child when deciding whether or not the test case passed. -The watched spawn mode avoids this problem. - -In order to propagate exit codes from children to their parents, any -'watching' parent must wait for all of its children to exit before it can -report its final exit status. We achieve this by using an UnsafeArc, using the -reference counting to track how many children are still alive, and using the -unwrap() operation in the parent's exit path to wait for all children to exit. -The UnsafeArc referred to here is actually the KillHandle itself. - -This also works transitively, as if a "middle" watched child task is itself -watching a grandchild task, the "middle" task will do unwrap() on its own -KillHandle (thereby waiting for the grandchild to exit) before dropping its -reference to its watching parent (which will alert the parent). - -While UnsafeArc::unwrap() accomplishes the synchronization, there remains the -matter of reporting the exit codes themselves. This is easiest when an exiting -watched task has no watched children of its own: - -- If the task with no watched children exits successfully, it need do nothing. -- If the task with no watched children has failed, it sets a flag in the - parent's KillHandle ("any_child_failed") to false. It then stays false forever. - -However, if a "middle" watched task with watched children of its own exits -before its child exits, we need to ensure that the grandparent task may still -see a failure from the grandchild task. While we could achieve this by having -each intermediate task block on its handle, this keeps around the other resources -the task was using. To be more efficient, this is accomplished via "tombstones". - -A tombstone is a closure, proc() -> bool, which will perform any waiting necessary -to collect the exit code of descendant tasks. In its environment is captured -the KillHandle of whichever task created the tombstone, and perhaps also any -tombstones that that task itself had, and finally also another tombstone, -effectively creating a lazy-list of heap closures. - -When a child wishes to exit early and leave tombstones behind for its parent, -it must use a LittleLock (pthread mutex) to synchronize with any possible -sibling tasks which are trying to do the same thing with the same parent. -However, on the other side, when the parent is ready to pull on the tombstones, -it need not use this lock, because the unwrap() serves as a barrier that ensures -no children will remain with references to the handle. - -The main logic for creating and assigning tombstones can be found in the -function reparent_children_to() in the impl for KillHandle. - - -IIA. Issues with exit code propagation. - -There are two known issues with the current scheme for exit code propagation. - -- As documented in issue #8136, the structure mandates the possibility for stack - overflow when collecting tombstones that are very deeply nested. This cannot - be avoided with the closure representation, as tombstones end up structured in - a sort of tree. However, notably, the tombstones do not actually need to be - collected in any particular order, and so a doubly-linked list may be used. - However we do not do this yet because DList is in libextra. - -- A discussion with Graydon made me realize that if we decoupled the exit code - propagation from the parents-waiting action, this could result in a simpler - implementation as the exit codes themselves would not have to be propagated, - and could instead be propagated implicitly through the taskgroup mechanism - that we already have. The tombstoning scheme would still be required. I have - not implemented this because currently we can't receive a linked failure kill - signal during the task cleanup activity, as that is currently "unkillable", - and occurs outside the task's unwinder's "try" block, so would require some - restructuring. - -*/ - -use cast; -use option::{Option, Some, None}; -use prelude::*; -use iter; -use task::TaskResult; -use rt::task::Task; -use unstable::atomics::{AtomicUint, SeqCst}; -use unstable::sync::UnsafeArc; - -/// A handle to a blocked task. Usually this means having the ~Task pointer by -/// ownership, but if the task is killable, a killer can steal it at any time. -pub enum BlockedTask { - Owned(~Task), - Shared(UnsafeArc), -} - -/// Per-task state related to task death, killing, failure, etc. -pub struct Death { - // Action to be done with the exit code. If set, also makes the task wait - // until all its watched children exit before collecting the status. - on_exit: Option, - // nesting level counter for unstable::atomically calls (0 == can deschedule). - priv wont_sleep: int, -} - -pub struct BlockedTaskIterator { - priv inner: UnsafeArc, -} - -impl Iterator for BlockedTaskIterator { - fn next(&mut self) -> Option { - Some(Shared(self.inner.clone())) - } -} - -impl BlockedTask { - /// Returns Some if the task was successfully woken; None if already killed. - pub fn wake(self) -> Option<~Task> { - match self { - Owned(task) => Some(task), - Shared(arc) => unsafe { - match (*arc.get()).swap(0, SeqCst) { - 0 => None, - n => cast::transmute(n), - } - } - } - } - - /// Create a blocked task, unless the task was already killed. - pub fn block(task: ~Task) -> BlockedTask { - Owned(task) - } - - /// Converts one blocked task handle to a list of many handles to the same. - pub fn make_selectable(self, num_handles: uint) - -> iter::Take - { - let arc = match self { - Owned(task) => { - let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; - UnsafeArc::new(flag) - } - Shared(arc) => arc.clone(), - }; - BlockedTaskIterator{ inner: arc }.take(num_handles) - } - - // This assertion has two flavours because the wake involves an atomic op. - // In the faster version, destructors will fail dramatically instead. - #[inline] #[cfg(not(test))] - pub fn assert_already_awake(self) { } - #[inline] #[cfg(test)] - pub fn assert_already_awake(self) { assert!(self.wake().is_none()); } - - /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag. - #[inline] - pub unsafe fn cast_to_uint(self) -> uint { - match self { - Owned(task) => { - let blocked_task_ptr: uint = cast::transmute(task); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr - } - Shared(arc) => { - let blocked_task_ptr: uint = cast::transmute(~arc); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr | 0x1 - } - } - } - - /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag. - #[inline] - pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { - if blocked_task_ptr & 0x1 == 0 { - Owned(cast::transmute(blocked_task_ptr)) - } else { - let ptr: ~UnsafeArc = cast::transmute(blocked_task_ptr & !1); - Shared(*ptr) - } - } -} - -impl Death { - pub fn new() -> Death { - Death { - on_exit: None, - wont_sleep: 0, - } - } - - /// Collect failure exit codes from children and propagate them to a parent. - pub fn collect_failure(&mut self, result: TaskResult) { - match self.on_exit.take() { - Some(f) => f(result), - None => {} - } - } - - /// Enter a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a subsequent call to allow_deschedule. - #[inline] - pub fn inhibit_deschedule(&mut self) { - self.wont_sleep += 1; - } - - /// Exit a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a preceding call to inhibit_deschedule. - #[inline] - pub fn allow_deschedule(&mut self) { - rtassert!(self.wont_sleep != 0); - self.wont_sleep -= 1; - } - - /// Ensure that the task is allowed to become descheduled. - #[inline] - pub fn assert_may_sleep(&self) { - if self.wont_sleep != 0 { - rtabort!("illegal atomic-sleep: attempt to reschedule while \ - using an Exclusive or LittleLock"); - } - } -} - -impl Drop for Death { - fn drop(&mut self) { - // Mustn't be in an atomic or unkillable section at task death. - rtassert!(self.wont_sleep == 0); - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use super::*; - - // Task blocking tests - - #[test] - fn block_and_wake() { - do with_test_task |task| { - BlockedTask::block(task).wake().unwrap() - } - } -} diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index d73ad98a25b37..1c04b6b43ce77 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::{Option, Some, None}; -use rt::sched::Scheduler; +use option::Option; use rt::task::Task; use rt::local_ptr; @@ -46,87 +45,10 @@ impl Local> for Task { } } -/// Encapsulates a temporarily-borrowed scheduler. -pub struct BorrowedScheduler { - priv task: local_ptr::Borrowed, -} - -impl BorrowedScheduler { - fn new(mut task: local_ptr::Borrowed) -> BorrowedScheduler { - if task.get().sched.is_none() { - rtabort!("no scheduler") - } else { - BorrowedScheduler { - task: task, - } - } - } - - #[inline] - pub fn get<'a>(&'a mut self) -> &'a mut ~Scheduler { - match self.task.get().sched { - None => rtabort!("no scheduler"), - Some(ref mut sched) => sched, - } - } -} - -impl Local for Scheduler { - fn put(value: ~Scheduler) { - let mut task = Local::borrow(None::); - task.get().sched = Some(value); - } - #[inline] - fn take() -> ~Scheduler { - unsafe { - // XXX: Unsafe for speed - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.take_unwrap() - } - } - fn exists(_: Option) -> bool { - let mut task = Local::borrow(None::); - task.get().sched.is_some() - } - #[inline] - fn borrow(_: Option) -> BorrowedScheduler { - BorrowedScheduler::new(Local::borrow(None::)) - } - unsafe fn unsafe_take() -> ~Scheduler { rtabort!("unimpl") } - unsafe fn unsafe_borrow() -> *mut Scheduler { - let task: *mut Task = Local::unsafe_borrow(); - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - return s; - } - None => { - rtabort!("no scheduler") - } - } - } - unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(task) => { - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - Some(s) - } - None => None - } - } - None => None - } - } -} - #[cfg(test)] mod test { use option::None; use unstable::run_in_bare_thread; - use rt::test::*; use super::*; use rt::task::Task; use rt::local_ptr; @@ -135,8 +57,7 @@ mod test { fn thread_local_task_smoke_test() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); @@ -147,12 +68,11 @@ mod test { fn thread_local_task_two_instances() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); @@ -164,8 +84,7 @@ mod test { fn borrow_smoke_test() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); unsafe { @@ -180,8 +99,7 @@ mod test { fn borrow_with_return() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); { @@ -193,5 +111,9 @@ mod test { } } + fn cleanup_task(mut t: ~Task) { + t.destroyed = true; + } + } diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs index 925aa802ad5c2..42cce272e4430 100644 --- a/src/libstd/rt/local_ptr.rs +++ b/src/libstd/rt/local_ptr.rs @@ -42,7 +42,7 @@ impl Drop for Borrowed { } let val: ~T = cast::transmute(self.val); put::(val); - assert!(exists()); + rtassert!(exists()); } } } @@ -109,7 +109,9 @@ pub mod compiled { /// Does not validate the pointer type. #[inline] pub unsafe fn take() -> ~T { - let ptr: ~T = cast::transmute(RT_TLS_PTR); + let ptr = RT_TLS_PTR; + rtassert!(!ptr.is_null()); + let ptr: ~T = cast::transmute(ptr); // can't use `as`, due to type not matching with `cfg(test)` RT_TLS_PTR = cast::transmute(0); ptr @@ -178,7 +180,7 @@ pub mod native { } pub unsafe fn cleanup() { - assert!(INITIALIZED); + rtassert!(INITIALIZED); tls::destroy(RT_TLS_KEY); LOCK.destroy(); INITIALIZED = false; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index df1ebeb6407aa..0dd6c883d5b47 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -57,27 +57,17 @@ Several modules in `core` are clients of `rt`: // XXX: this should not be here. #[allow(missing_doc)]; +use any::Any; use clone::Clone; use container::Container; use iter::Iterator; -use option::{Option, None, Some}; +use option::Option; use ptr::RawPtr; -use rt::local::Local; -use rt::sched::{Scheduler, Shutdown}; -use rt::sleeper_list::SleeperList; -use task::TaskResult; -use rt::task::{Task, SchedTask, GreenTask, Sched}; -use send_str::SendStrStatic; -use unstable::atomics::{AtomicInt, AtomicBool, SeqCst}; -use unstable::sync::UnsafeArc; +use result::Result; +use task::TaskOpts; use vec::{OwnedVector, MutableVector, ImmutableVector}; -use vec; -use self::thread::Thread; - -// the os module needs to reach into this helper, so allow general access -// through this reexport. -pub use self::util::set_exit_status; +use self::task::{Task, BlockedTask}; // this is somewhat useful when a program wants to spawn a "reasonable" number // of workers based on the constraints of the system that it's running on. @@ -85,8 +75,8 @@ pub use self::util::set_exit_status; // method... pub use self::util::default_sched_threads; -// Re-export of the functionality in the kill module -pub use self::kill::BlockedTask; +// Export unwinding facilities used by the failure macros +pub use self::unwind::{begin_unwind, begin_unwind_raw}; // XXX: these probably shouldn't be public... #[doc(hidden)] @@ -99,21 +89,12 @@ pub mod shouldnt_be_public { // Internal macros used by the runtime. mod macros; -/// Basic implementation of an EventLoop, provides no I/O interfaces -mod basic; - /// The global (exchange) heap. pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; -/// Facilities related to task failure, killing, and death. -mod kill; - -/// The coroutine task scheduler, built on the `io` event loop. -pub mod sched; - /// The EventLoop and internal synchronous I/O interface. pub mod rtio; @@ -121,27 +102,6 @@ pub mod rtio; /// or task-local storage. pub mod local; -/// A mostly lock-free multi-producer, single consumer queue. -pub mod mpsc_queue; - -/// A lock-free single-producer, single consumer queue. -pub mod spsc_queue; - -/// A lock-free multi-producer, multi-consumer bounded queue. -mod mpmc_bounded_queue; - -/// A parallel work-stealing deque -pub mod deque; - -/// A parallel data structure for tracking sleeping schedulers. -pub mod sleeper_list; - -/// Stack segments and caching. -pub mod stack; - -/// CPU context swapping. -mod context; - /// Bindings to system threading libraries. pub mod thread; @@ -157,16 +117,6 @@ pub mod logging; /// Crate map pub mod crate_map; -/// Tools for testing the runtime -pub mod test; - -/// Reference counting -pub mod rc; - -/// A simple single-threaded channel type for passing buffered data between -/// scheduler and task context -pub mod tube; - /// The runtime needs to be able to put a pointer into thread-local storage. mod local_ptr; @@ -185,41 +135,33 @@ pub mod args; // Support for dynamic borrowck pub mod borrowck; -/// Set up a default runtime configuration, given compiler-supplied arguments. -/// -/// This is invoked by the `start` _language item_ (unstable::lang) to -/// run a Rust executable. -/// -/// # Arguments -/// -/// * `argc` & `argv` - The argument vector. On Unix this information is used -/// by os::args. -/// -/// # Return value -/// -/// The return value is used as the process return code. 0 on success, 101 on error. -pub fn start(argc: int, argv: **u8, main: proc()) -> int { - - init(argc, argv); - let exit_code = run(main); - // unsafe is ok b/c we're sure that the runtime is gone - unsafe { cleanup(); } - - return exit_code; -} +/// The default error code of the rust runtime if the main task fails instead +/// of exiting cleanly. +pub static DEFAULT_ERROR_CODE: int = 101; -/// Like `start` but creates an additional scheduler on the current thread, -/// which in most cases will be the 'main' thread, and pins the main task to it. +/// The interface to the current runtime. /// -/// This is appropriate for running code that must execute on the main thread, -/// such as the platform event loop and GUI. -pub fn start_on_main_thread(argc: int, argv: **u8, main: proc()) -> int { - init(argc, argv); - let exit_code = run_on_main_thread(main); - // unsafe is ok b/c we're sure that the runtime is gone - unsafe { cleanup(); } - - return exit_code; +/// This trait is used as the abstraction between 1:1 and M:N scheduling. The +/// two independent crates, libnative and libgreen, both have objects which +/// implement this trait. The goal of this trait is to encompass all the +/// fundamental differences in functionality between the 1:1 and M:N runtime +/// modes. +pub trait Runtime { + // Necessary scheduling functions, used for channels and blocking I/O + // (sometimes). + fn yield_now(~self, cur_task: ~Task); + fn maybe_yield(~self, cur_task: ~Task); + fn deschedule(~self, times: uint, cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>); + fn reawaken(~self, to_wake: ~Task, can_resched: bool); + + // Miscellaneous calls which are very different depending on what context + // you're in. + fn spawn_sibling(~self, cur_task: ~Task, opts: TaskOpts, f: proc()); + fn local_io<'a>(&'a mut self) -> Option>; + + // XXX: This is a serious code smell and this should not exist at all. + fn wrap(~self) -> ~Any; } /// One-time runtime initialization. @@ -234,6 +176,7 @@ pub fn init(argc: int, argv: **u8) { args::init(argc, argv); env::init(); logging::init(); + local_ptr::init(); } } @@ -250,239 +193,3 @@ pub unsafe fn cleanup() { args::cleanup(); local_ptr::cleanup(); } - -/// Execute the main function in a scheduler. -/// -/// Configures the runtime according to the environment, by default -/// using a task scheduler with the same number of threads as cores. -/// Returns a process exit code. -pub fn run(main: proc()) -> int { - run_(main, false) -} - -pub fn run_on_main_thread(main: proc()) -> int { - run_(main, true) -} - -fn run_(main: proc(), use_main_sched: bool) -> int { - static DEFAULT_ERROR_CODE: int = 101; - - let nscheds = util::default_sched_threads(); - - let mut main = Some(main); - - // The shared list of sleeping schedulers. - let sleepers = SleeperList::new(); - - // Create a work queue for each scheduler, ntimes. Create an extra - // for the main thread if that flag is set. We won't steal from it. - let mut pool = deque::BufferPool::new(); - let arr = vec::from_fn(nscheds, |_| pool.deque()); - let (workers, stealers) = vec::unzip(arr.move_iter()); - - // The schedulers. - let mut scheds = ~[]; - // Handles to the schedulers. When the main task ends these will be - // sent the Shutdown message to terminate the schedulers. - let mut handles = ~[]; - - for worker in workers.move_iter() { - rtdebug!("inserting a regular scheduler"); - - // Every scheduler is driven by an I/O event loop. - let loop_ = new_event_loop(); - let mut sched = ~Scheduler::new(loop_, - worker, - stealers.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - - scheds.push(sched); - handles.push(handle); - } - - // If we need a main-thread task then create a main thread scheduler - // that will reject any task that isn't pinned to it - let main_sched = if use_main_sched { - - // Create a friend handle. - let mut friend_sched = scheds.pop(); - let friend_handle = friend_sched.make_handle(); - scheds.push(friend_sched); - - // This scheduler needs a queue that isn't part of the stealee - // set. - let (worker, _) = pool.deque(); - - let main_loop = new_event_loop(); - let mut main_sched = ~Scheduler::new_special(main_loop, - worker, - stealers.clone(), - sleepers.clone(), - false, - Some(friend_handle)); - let mut main_handle = main_sched.make_handle(); - // Allow the scheduler to exit when the main task exits. - // Note: sending the shutdown message also prevents the scheduler - // from pushing itself to the sleeper list, which is used for - // waking up schedulers for work stealing; since this is a - // non-work-stealing scheduler it should not be adding itself - // to the list. - main_handle.send(Shutdown); - Some(main_sched) - } else { - None - }; - - // Create a shared cell for transmitting the process exit - // code from the main task to this function. - let exit_code = UnsafeArc::new(AtomicInt::new(0)); - let exit_code_clone = exit_code.clone(); - - // Used to sanity check that the runtime only exits once - let exited_already = UnsafeArc::new(AtomicBool::new(false)); - - // When the main task exits, after all the tasks in the main - // task tree, shut down the schedulers and set the exit code. - let handles = handles; - let on_exit: proc(TaskResult) = proc(exit_success) { - unsafe { - assert!(!(*exited_already.get()).swap(true, SeqCst), - "the runtime already exited"); - } - - let mut handles = handles; - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - - unsafe { - let exit_code = if exit_success.is_ok() { - use rt::util; - - // If we're exiting successfully, then return the global - // exit status, which can be set programmatically. - util::get_exit_status() - } else { - DEFAULT_ERROR_CODE - }; - (*exit_code_clone.get()).store(exit_code, SeqCst); - } - }; - - let mut threads = ~[]; - let mut on_exit = Some(on_exit); - - if !use_main_sched { - - // In the case where we do not use a main_thread scheduler we - // run the main task in one of our threads. - - let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, - None, - ::util::replace(&mut main, - None).unwrap()); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = ::util::replace(&mut on_exit, None); - - let sched = scheds.pop(); - let main_task = main_task; - let thread = do Thread::start { - sched.bootstrap(main_task); - }; - threads.push(thread); - } - - // Run each remaining scheduler in a thread. - for sched in scheds.move_rev_iter() { - rtdebug!("creating regular schedulers"); - let thread = do Thread::start { - let mut sched = sched; - let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || { - rtdebug!("boostraping a non-primary scheduler"); - }; - sched.bootstrap(bootstrap_task); - }; - threads.push(thread); - } - - // If we do have a main thread scheduler, run it now. - - if use_main_sched { - rtdebug!("about to create the main scheduler task"); - - let mut main_sched = main_sched.unwrap(); - - let home = Sched(main_sched.make_handle()); - let mut main_task = ~Task::new_root_homed(&mut main_sched.stack_pool, - None, - home, - ::util::replace(&mut main, - None). - unwrap()); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = ::util::replace(&mut on_exit, None); - rtdebug!("bootstrapping main_task"); - - main_sched.bootstrap(main_task); - } - - rtdebug!("waiting for threads"); - - // Wait for schedulers - for thread in threads.move_iter() { - thread.join(); - } - - // Return the exit code - unsafe { - (*exit_code.get()).load(SeqCst) - } -} - -pub fn in_sched_context() -> bool { - unsafe { - let task_ptr: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_ptr { - Some(task) => { - match (*task).task_type { - SchedTask => true, - _ => false - } - } - None => false - } - } -} - -pub fn in_green_task_context() -> bool { - unsafe { - let task: Option<*mut Task> = Local::try_unsafe_borrow(); - match task { - Some(task) => { - match (*task).task_type { - GreenTask(_) => true, - _ => false - } - } - None => false - } - } -} - -pub fn new_event_loop() -> ~rtio::EventLoop { - match crate_map::get_crate_map() { - None => {} - Some(map) => { - match map.event_loop_factory { - None => {} - Some(factory) => return factory() - } - } - } - - // If the crate map didn't specify a factory to create an event loop, then - // instead just use a basic event loop missing all I/O services to at least - // get the scheduler running. - return basic::event_loop(); -} diff --git a/src/libstd/rt/rc.rs b/src/libstd/rt/rc.rs deleted file mode 100644 index 2699dab6d38a8..0000000000000 --- a/src/libstd/rt/rc.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! An owned, task-local, reference counted type -//! -//! # Safety note -//! -//! XXX There is currently no type-system mechanism for enforcing that -//! reference counted types are both allocated on the exchange heap -//! and also non-sendable -//! -//! This doesn't prevent borrowing multiple aliasable mutable pointers - -use ops::Drop; -use clone::Clone; -use libc::c_void; -use cast; - -pub struct RC { - priv p: *c_void // ~(uint, T) -} - -impl RC { - pub fn new(val: T) -> RC { - unsafe { - let v = ~(1, val); - let p: *c_void = cast::transmute(v); - RC { p: p } - } - } - - fn get_mut_state(&mut self) -> *mut (uint, T) { - unsafe { - let p: &mut ~(uint, T) = cast::transmute(&mut self.p); - let p: *mut (uint, T) = &mut **p; - return p; - } - } - - fn get_state(&self) -> *(uint, T) { - unsafe { - let p: &~(uint, T) = cast::transmute(&self.p); - let p: *(uint, T) = &**p; - return p; - } - } - - pub fn unsafe_borrow_mut(&mut self) -> *mut T { - unsafe { - match *self.get_mut_state() { - (_, ref mut p) => { - let p: *mut T = p; - return p; - } - } - } - } - - pub fn refcount(&self) -> uint { - unsafe { - match *self.get_state() { - (count, _) => count - } - } - } -} - -#[unsafe_destructor] -impl Drop for RC { - fn drop(&mut self) { - assert!(self.refcount() > 0); - - unsafe { - match *self.get_mut_state() { - (ref mut count, _) => { - *count = *count - 1 - } - } - - if self.refcount() == 0 { - let _: ~(uint, T) = cast::transmute(self.p); - } - } - } -} - -impl Clone for RC { - fn clone(&self) -> RC { - unsafe { - // XXX: Mutable clone - let this: &mut RC = cast::transmute_mut(self); - - match *this.get_mut_state() { - (ref mut count, _) => { - *count = *count + 1; - } - } - } - - RC { p: self.p } - } -} - -#[cfg(test)] -mod test { - use super::RC; - - #[test] - fn smoke_test() { - unsafe { - let mut v1 = RC::new(100); - assert!(*v1.unsafe_borrow_mut() == 100); - assert!(v1.refcount() == 1); - - let mut v2 = v1.clone(); - assert!(*v2.unsafe_borrow_mut() == 100); - assert!(v2.refcount() == 2); - - *v2.unsafe_borrow_mut() = 200; - assert!(*v2.unsafe_borrow_mut() == 200); - assert!(*v1.unsafe_borrow_mut() == 200); - - let v3 = v2.clone(); - assert!(v3.refcount() == 3); - { - let _v1 = v1; - let _v2 = v2; - } - assert!(v3.refcount() == 1); - } - } -} diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index b54231421e396..6b3d50a76ac8f 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -14,14 +14,15 @@ use comm::{SharedChan, Port}; use libc::c_int; use libc; use ops::Drop; -use option::*; +use option::{Option, Some, None}; use path::Path; -use result::*; +use result::{Result, Ok, Err}; +use rt::task::Task; +use rt::local::Local; use ai = io::net::addrinfo; +use io; use io::IoError; -use io::native::NATIVE_IO_FACTORY; -use io::native; use io::net::ip::{IpAddr, SocketAddr}; use io::process::{ProcessConfig, ProcessExit}; use io::signal::Signum; @@ -93,36 +94,52 @@ impl<'a> Drop for LocalIo<'a> { impl<'a> LocalIo<'a> { /// Returns the local I/O: either the local scheduler's I/O services or /// the native I/O services. - pub fn borrow() -> LocalIo { - use rt::sched::Scheduler; - use rt::local::Local; + pub fn borrow() -> Option { + // FIXME(#11053): bad + // + // This is currently very unsafely implemented. We don't actually + // *take* the local I/O so there's a very real possibility that we + // can have two borrows at once. Currently there is not a clear way + // to actually borrow the local I/O factory safely because even if + // ownership were transferred down to the functions that the I/O + // factory implements it's just too much of a pain to know when to + // relinquish ownership back into the local task (but that would be + // the safe way of implementing this function). + // + // In order to get around this, we just transmute a copy out of the task + // in order to have what is likely a static lifetime (bad). + let mut t: ~Task = Local::take(); + let ret = t.local_io().map(|t| { + unsafe { cast::transmute_copy(&t) } + }); + Local::put(t); + return ret; + } - unsafe { - // First, attempt to use the local scheduler's I/O services - let sched: Option<*mut Scheduler> = Local::try_unsafe_borrow(); - match sched { - Some(sched) => { - match (*sched).event_loop.io() { - Some(factory) => { - return LocalIo { - factory: factory, - } - } - None => {} + pub fn maybe_raise(f: |io: &mut IoFactory| -> Result) + -> Option + { + match LocalIo::borrow() { + None => { + io::io_error::cond.raise(io::standard_error(io::IoUnavailable)); + None + } + Some(mut io) => { + match f(io.get()) { + Ok(t) => Some(t), + Err(ioerr) => { + io::io_error::cond.raise(ioerr); + None } } - None => {} - } - // If we don't have a scheduler or the scheduler doesn't have I/O - // services, then fall back to the native I/O services. - let native_io: &'static mut native::IoFactory = - &mut NATIVE_IO_FACTORY; - LocalIo { - factory: native_io as &mut IoFactory:'static } } } + pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> { + LocalIo { factory: io } + } + /// Returns the underlying I/O factory as a trait reference. #[inline] pub fn get<'a>(&'a mut self) -> &'a mut IoFactory { diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 30e05e9091f3e..e6ab159a76952 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -13,29 +13,41 @@ //! local storage, and logging. Even a 'freestanding' Rust would likely want //! to implement this. -use super::local_heap::LocalHeap; - -use prelude::*; - +use any::AnyOwnExt; use borrow; +use cast; use cleanup; use io::Writer; -use libc::{c_char, size_t}; +use iter::{Iterator, Take}; use local_data; +use ops::Drop; use option::{Option, Some, None}; +use prelude::drop; +use result::{Result, Ok, Err}; +use rt::Runtime; use rt::borrowck::BorrowRecord; use rt::borrowck; -use rt::context::Context; -use rt::env; -use rt::kill::Death; use rt::local::Local; +use rt::local_heap::LocalHeap; use rt::logging::StdErrLogger; -use rt::sched::{Scheduler, SchedHandle}; -use rt::stack::{StackSegment, StackPool}; +use rt::rtio::LocalIo; use rt::unwind::Unwinder; use send_str::SendStr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; +use task::{TaskResult, TaskOpts}; use unstable::finally::Finally; -use unstable::mutex::Mutex; +use unstable::mutex::{Mutex, MUTEX_INIT}; + +#[cfg(stage0)] +pub use rt::unwind::begin_unwind; + +// These two statics are used as bookeeping to keep track of the rust runtime's +// count of threads. In 1:1 contexts, this is used to know when to return from +// the main function, and in M:N contexts this is used to know when to shut down +// the pool of schedulers. +static mut TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT; +static mut TASK_LOCK: Mutex = MUTEX_INIT; // The Task struct represents all state associated with a rust // task. There are at this point two primary "subtypes" of task, @@ -45,201 +57,90 @@ use unstable::mutex::Mutex; pub struct Task { heap: LocalHeap, - priv gc: GarbageCollector, + gc: GarbageCollector, storage: LocalStorage, - logger: Option, unwinder: Unwinder, death: Death, destroyed: bool, name: Option, - coroutine: Option, - sched: Option<~Scheduler>, - task_type: TaskType, // Dynamic borrowck debugging info borrow_list: Option<~[BorrowRecord]>, + + logger: Option, stdout_handle: Option<~Writer>, - // See the comments in the scheduler about why this is necessary - nasty_deschedule_lock: Mutex, + priv imp: Option<~Runtime>, } -pub enum TaskType { - GreenTask(Option), - SchedTask -} +pub struct GarbageCollector; +pub struct LocalStorage(Option); -/// A coroutine is nothing more than a (register context, stack) pair. -pub struct Coroutine { - /// The segment of stack on which the task is currently running or - /// if the task is blocked, on which the task will resume - /// execution. - /// - /// Servo needs this to be public in order to tell SpiderMonkey - /// about the stack bounds. - current_stack_segment: StackSegment, - /// Always valid if the task is alive and not running. - saved_context: Context +/// A handle to a blocked task. Usually this means having the ~Task pointer by +/// ownership, but if the task is killable, a killer can steal it at any time. +pub enum BlockedTask { + Owned(~Task), + Shared(UnsafeArc), } -/// Some tasks have a dedicated home scheduler that they must run on. -pub enum SchedHome { - AnySched, - Sched(SchedHandle) +/// Per-task state related to task death, killing, failure, etc. +pub struct Death { + // Action to be done with the exit code. If set, also makes the task wait + // until all its watched children exit before collecting the status. + on_exit: Option, } -pub struct GarbageCollector; -pub struct LocalStorage(Option); +pub struct BlockedTaskIterator { + priv inner: UnsafeArc, +} impl Task { - - // A helper to build a new task using the dynamically found - // scheduler and task. Only works in GreenTask context. - pub fn build_homed_child(stack_size: Option, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~running_task.get() - .new_child_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_child(stack_size: Option, f: proc()) -> ~Task { - Task::build_homed_child(stack_size, f, AnySched) - } - - pub fn build_homed_root(stack_size: Option, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~Task::new_root_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_root(stack_size: Option, f: proc()) -> ~Task { - Task::build_homed_root(stack_size, f, AnySched) - } - - pub fn new_sched_task() -> Task { + pub fn new() -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, + unwinder: Unwinder::new(), death: Death::new(), destroyed: false, - coroutine: Some(Coroutine::empty()), name: None, - sched: None, - task_type: SchedTask, borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn new_root(stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) -> Task { - Task::new_root_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_child(&mut self, - stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) -> Task { - self.new_child_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_root_homed(stack_pool: &mut StackPool, - stack_size: Option, - home: SchedHome, - start: proc()) -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), - borrow_list: None, stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, + imp: None, } } - pub fn new_child_homed(&mut self, - stack_pool: &mut StackPool, - stack_size: Option, - home: SchedHome, - start: proc()) -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), - borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn give_home(&mut self, new_home: SchedHome) { - match self.task_type { - GreenTask(ref mut home) => { - *home = Some(new_home); - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn take_unwrap_home(&mut self) -> SchedHome { - match self.task_type { - GreenTask(ref mut home) => { - let out = home.take_unwrap(); - return out; - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn run(&mut self, f: ||) { - rtdebug!("run called on task: {}", borrow::to_uint(self)); + /// Executes the given closure as if it's running inside this task. The task + /// is consumed upon entry, and the destroyed task is returned from this + /// function in order for the caller to free. This function is guaranteed to + /// not unwind because the closure specified is run inside of a `rust_try` + /// block. (this is the only try/catch block in the world). + /// + /// This function is *not* meant to be abused as a "try/catch" block. This + /// is meant to be used at the absolute boundaries of a task's lifetime, and + /// only for that purpose. + pub fn run(~self, f: ||) -> ~Task { + // Need to put ourselves into TLS, but also need access to the unwinder. + // Unsafely get a handle to the task so we can continue to use it after + // putting it in tls (so we can invoke the unwinder). + let handle: *mut Task = unsafe { + *cast::transmute::<&~Task, &*mut Task>(&self) + }; + Local::put(self); + unsafe { TASK_COUNT.fetch_add(1, SeqCst); } // The only try/catch block in the world. Attempt to run the task's // client-specified code and catch any failures. - self.unwinder.try(|| { + let try_block = || { // Run the task main function, then do some cleanup. f.finally(|| { + fn flush(w: Option<~Writer>) { + match w { + Some(mut w) => { w.flush(); } + None => {} + } + } // First, destroy task-local storage. This may run user dtors. // @@ -260,7 +161,10 @@ impl Task { // TLS, or possibly some destructors for those objects being // annihilated invoke TLS. Sadly these two operations seemed to // be intertwined, and miraculously work for now... - self.storage.take(); + let mut task = Local::borrow(None::); + let storage = task.get().storage.take(); + drop(task); + drop(storage); // Destroy remaining boxes. Also may run user dtors. unsafe { cleanup::annihilate(); } @@ -268,77 +172,141 @@ impl Task { // Finally flush and destroy any output handles which the task // owns. There are no boxes here, and no user destructors should // run after this any more. - match self.stdout_handle.take() { - Some(handle) => { - let mut handle = handle; - handle.flush(); - } - None => {} - } - self.logger.take(); + let mut task = Local::borrow(None::); + let stdout = task.get().stdout_handle.take(); + let logger = task.get().logger.take(); + drop(task); + + flush(stdout); + drop(logger); }) - }); + }; + + unsafe { (*handle).unwinder.try(try_block); } // Cleanup the dynamic borrowck debugging info borrowck::clear_task_borrow_list(); - self.death.collect_failure(self.unwinder.result()); - self.destroyed = true; + // Here we must unsafely borrow the task in order to not remove it from + // TLS. When collecting failure, we may attempt to send on a channel (or + // just run aribitrary code), so we must be sure to still have a local + // task in TLS. + unsafe { + let me: *mut Task = Local::unsafe_borrow(); + (*me).death.collect_failure((*me).unwinder.result()); + + // see comments on these statics for why they're used + if TASK_COUNT.fetch_sub(1, SeqCst) == 1 { + TASK_LOCK.lock(); + TASK_LOCK.signal(); + TASK_LOCK.unlock(); + } + } + let mut me: ~Task = Local::take(); + me.destroyed = true; + return me; } - // New utility functions for homes. + /// Inserts a runtime object into this task, transferring ownership to the + /// task. It is illegal to replace a previous runtime object in this task + /// with this argument. + pub fn put_runtime(&mut self, ops: ~Runtime) { + assert!(self.imp.is_none()); + self.imp = Some(ops); + } - pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => { - *id == sched.sched_id() - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - // Awe yea - rtabort!("type error: expected: GreenTask, found: SchedTask"); + /// Attempts to extract the runtime as a specific type. If the runtime does + /// not have the provided type, then the runtime is not removed. If the + /// runtime does have the specified type, then it is removed and returned + /// (transfer of ownership). + /// + /// It is recommended to only use this method when *absolutely necessary*. + /// This function may not be available in the future. + pub fn maybe_take_runtime(&mut self) -> Option<~T> { + // This is a terrible, terrible function. The general idea here is to + // take the runtime, cast it to ~Any, check if it has the right type, + // and then re-cast it back if necessary. The method of doing this is + // pretty sketchy and involves shuffling vtables of trait objects + // around, but it gets the job done. + // + // XXX: This function is a serious code smell and should be avoided at + // all costs. I have yet to think of a method to avoid this + // function, and I would be saddened if more usage of the function + // crops up. + unsafe { + let imp = self.imp.take_unwrap(); + let &(vtable, _): &(uint, uint) = cast::transmute(&imp); + match imp.wrap().move::() { + Ok(t) => Some(t), + Err(t) => { + let (_, obj): (uint, uint) = cast::transmute(t); + let obj: ~Runtime = cast::transmute((vtable, obj)); + self.put_runtime(obj); + None + } } } } - pub fn homed(&self) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { .. }))) => { true } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } - } + /// Spawns a sibling to this task. The newly spawned task is configured with + /// the `opts` structure and will run `f` as the body of its code. + pub fn spawn_sibling(mut ~self, opts: TaskOpts, f: proc()) { + let ops = self.imp.take_unwrap(); + ops.spawn_sibling(self, opts, f) } - // Grab both the scheduler and the task from TLS and check if the - // task is executing on an appropriate scheduler. - pub fn on_appropriate_sched() -> bool { - let mut task = Local::borrow(None::); - let sched_id = task.get().sched.get_ref().sched_id(); - let sched_run_anything = task.get().sched.get_ref().run_anything; - match task.get().task_type { - GreenTask(Some(AnySched)) => { - rtdebug!("anysched task in sched check ****"); - sched_run_anything - } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => { - rtdebug!("homed task in sched check ****"); - *id == sched_id - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } + /// Deschedules the current task, invoking `f` `amt` times. It is not + /// recommended to use this function directly, but rather communication + /// primitives in `std::comm` should be used. + pub fn deschedule(mut ~self, amt: uint, + f: |BlockedTask| -> Result<(), BlockedTask>) { + let ops = self.imp.take_unwrap(); + ops.deschedule(amt, self, f) + } + + /// Wakes up a previously blocked task, optionally specifiying whether the + /// current task can accept a change in scheduling. This function can only + /// be called on tasks that were previously blocked in `deschedule`. + pub fn reawaken(mut ~self, can_resched: bool) { + let ops = self.imp.take_unwrap(); + ops.reawaken(self, can_resched); + } + + /// Yields control of this task to another task. This function will + /// eventually return, but possibly not immediately. This is used as an + /// opportunity to allow other tasks a chance to run. + pub fn yield_now(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.yield_now(self); + } + + /// Similar to `yield_now`, except that this function may immediately return + /// without yielding (depending on what the runtime decides to do). + pub fn maybe_yield(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.maybe_yield(self); + } + + /// Acquires a handle to the I/O factory that this task contains, normally + /// stored in the task's runtime. This factory may not always be available, + /// which is why the return type is `Option` + pub fn local_io<'a>(&'a mut self) -> Option> { + self.imp.get_mut_ref().local_io() + } + + /// The main function of all rust executables will by default use this + /// function. This function will *block* the OS thread (hence the `unsafe`) + /// waiting for all known tasks to complete. Once this function has + /// returned, it is guaranteed that no more user-defined code is still + /// running. + pub unsafe fn wait_for_other_tasks(&mut self) { + TASK_COUNT.fetch_sub(1, SeqCst); // don't count ourselves + TASK_LOCK.lock(); + while TASK_COUNT.load(SeqCst) > 0 { + TASK_LOCK.wait(); } + TASK_LOCK.unlock(); + TASK_COUNT.fetch_add(1, SeqCst); // add ourselves back in } } @@ -346,348 +314,192 @@ impl Drop for Task { fn drop(&mut self) { rtdebug!("called drop for a task: {}", borrow::to_uint(self)); rtassert!(self.destroyed); - - unsafe { self.nasty_deschedule_lock.destroy(); } } } -// Coroutines represent nothing more than a context and a stack -// segment. - -impl Coroutine { - - pub fn new(stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) - -> Coroutine { - let stack_size = match stack_size { - Some(size) => size, - None => env::min_stack() - }; - let start = Coroutine::build_start_wrapper(start); - let mut stack = stack_pool.take_segment(stack_size); - let initial_context = Context::new(start, &mut stack); - Coroutine { - current_stack_segment: stack, - saved_context: initial_context - } +impl Iterator for BlockedTaskIterator { + fn next(&mut self) -> Option { + Some(Shared(self.inner.clone())) } +} - pub fn empty() -> Coroutine { - Coroutine { - current_stack_segment: StackSegment::new(0), - saved_context: Context::empty() +impl BlockedTask { + /// Returns Some if the task was successfully woken; None if already killed. + pub fn wake(self) -> Option<~Task> { + match self { + Owned(task) => Some(task), + Shared(arc) => unsafe { + match (*arc.get()).swap(0, SeqCst) { + 0 => None, + n => Some(cast::transmute(n)), + } + } } } - fn build_start_wrapper(start: proc()) -> proc() { - let wrapper: proc() = proc() { - // First code after swap to this new context. Run our - // cleanup job. - unsafe { + // This assertion has two flavours because the wake involves an atomic op. + // In the faster version, destructors will fail dramatically instead. + #[cfg(not(test))] pub fn trash(self) { } + #[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); } - // Again - might work while safe, or it might not. - { - let mut sched = Local::borrow(None::); - sched.get().run_cleanup_job(); - } + /// Create a blocked task, unless the task was already killed. + pub fn block(task: ~Task) -> BlockedTask { + Owned(task) + } - // To call the run method on a task we need a direct - // reference to it. The task is in TLS, so we can - // simply unsafe_borrow it to get this reference. We - // need to still have the task in TLS though, so we - // need to unsafe_borrow. - let task: *mut Task = Local::unsafe_borrow(); - - let mut start_cell = Some(start); - (*task).run(|| { - // N.B. Removing `start` from the start wrapper - // closure by emptying a cell is critical for - // correctness. The ~Task pointer, and in turn the - // closure used to initialize the first call - // frame, is destroyed in the scheduler context, - // not task context. So any captured closures must - // not contain user-definable dtors that expect to - // be in task context. By moving `start` out of - // the closure, all the user code goes our of - // scope while the task is still running. - let start = start_cell.take_unwrap(); - start(); - }); + /// Converts one blocked task handle to a list of many handles to the same. + pub fn make_selectable(self, num_handles: uint) -> Take + { + let arc = match self { + Owned(task) => { + let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; + UnsafeArc::new(flag) } - - // We remove the sched from the Task in TLS right now. - let sched: ~Scheduler = Local::take(); - // ... allowing us to give it away when performing a - // scheduling operation. - sched.terminate_current_task() + Shared(arc) => arc.clone(), }; - return wrapper; + BlockedTaskIterator{ inner: arc }.take(num_handles) } - /// Destroy coroutine and try to reuse stack segment. - pub fn recycle(self, stack_pool: &mut StackPool) { + /// Convert to an unsafe uint value. Useful for storing in a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_to_uint(self) -> uint { match self { - Coroutine { current_stack_segment, .. } => { - stack_pool.give_segment(current_stack_segment); + Owned(task) => { + let blocked_task_ptr: uint = cast::transmute(task); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr + } + Shared(arc) => { + let blocked_task_ptr: uint = cast::transmute(~arc); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr | 0x1 } } } -} - -/// This function is invoked from rust's current __morestack function. Segmented -/// stacks are currently not enabled as segmented stacks, but rather one giant -/// stack segment. This means that whenever we run out of stack, we want to -/// truly consider it to be stack overflow rather than allocating a new stack. -#[no_mangle] // - this is called from C code -#[no_split_stack] // - it would be sad for this function to trigger __morestack -#[doc(hidden)] // - Function must be `pub` to get exported, but it's - // irrelevant for documentation purposes. -#[cfg(not(test))] // in testing, use the original libstd's version -pub extern "C" fn rust_stack_exhausted() { - use rt::context; - use rt::in_green_task_context; - use rt::task::Task; - use rt::local::Local; - use unstable::intrinsics; - - unsafe { - // We're calling this function because the stack just ran out. We need - // to call some other rust functions, but if we invoke the functions - // right now it'll just trigger this handler being called again. In - // order to alleviate this, we move the stack limit to be inside of the - // red zone that was allocated for exactly this reason. - let limit = context::get_sp_limit(); - context::record_sp_limit(limit - context::RED_ZONE / 2); - - // This probably isn't the best course of action. Ideally one would want - // to unwind the stack here instead of just aborting the entire process. - // This is a tricky problem, however. There's a few things which need to - // be considered: - // - // 1. We're here because of a stack overflow, yet unwinding will run - // destructors and hence arbitrary code. What if that code overflows - // the stack? One possibility is to use the above allocation of an - // extra 10k to hope that we don't hit the limit, and if we do then - // abort the whole program. Not the best, but kind of hard to deal - // with unless we want to switch stacks. - // - // 2. LLVM will optimize functions based on whether they can unwind or - // not. It will flag functions with 'nounwind' if it believes that - // the function cannot trigger unwinding, but if we do unwind on - // stack overflow then it means that we could unwind in any function - // anywhere. We would have to make sure that LLVM only places the - // nounwind flag on functions which don't call any other functions. - // - // 3. The function that overflowed may have owned arguments. These - // arguments need to have their destructors run, but we haven't even - // begun executing the function yet, so unwinding will not run the - // any landing pads for these functions. If this is ignored, then - // the arguments will just be leaked. - // - // Exactly what to do here is a very delicate topic, and is possibly - // still up in the air for what exactly to do. Some relevant issues: - // - // #3555 - out-of-stack failure leaks arguments - // #3695 - should there be a stack limit? - // #9855 - possible strategies which could be taken - // #9854 - unwinding on windows through __morestack has never worked - // #2361 - possible implementation of not using landing pads - - if in_green_task_context() { - let mut task = Local::borrow(None::); - let n = task.get() - .name - .as_ref() - .map(|n| n.as_slice()) - .unwrap_or(""); - - // See the message below for why this is not emitted to the - // task's logger. This has the additional conundrum of the - // logger may not be initialized just yet, meaning that an FFI - // call would happen to initialized it (calling out to libuv), - // and the FFI call needs 2MB of stack when we just ran out. - rterrln!("task '{}' has overflowed its stack", n); + /// Convert from an unsafe uint value. Useful for retrieving a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { + if blocked_task_ptr & 0x1 == 0 { + Owned(cast::transmute(blocked_task_ptr)) } else { - rterrln!("stack overflow in non-task context"); + let ptr: ~UnsafeArc = + cast::transmute(blocked_task_ptr & !1); + Shared(*ptr) } - - intrinsics::abort(); } } -/// This is the entry point of unwinding for things like lang items and such. -/// The arguments are normally generated by the compiler, and need to -/// have static lifetimes. -pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { - use c_str::CString; - use cast::transmute; +impl Death { + pub fn new() -> Death { + Death { on_exit: None, } + } - #[inline] - fn static_char_ptr(p: *c_char) -> &'static str { - let s = unsafe { CString::new(p, false) }; - match s.as_str() { - Some(s) => unsafe { transmute::<&str, &'static str>(s) }, - None => rtabort!("message wasn't utf8?") + /// Collect failure exit codes from children and propagate them to a parent. + pub fn collect_failure(&mut self, result: TaskResult) { + match self.on_exit.take() { + Some(f) => f(result), + None => {} } } - - let msg = static_char_ptr(msg); - let file = static_char_ptr(file); - - begin_unwind(msg, file, line as uint) } -/// This is the entry point of unwinding for fail!() and assert!(). -pub fn begin_unwind(msg: M, file: &'static str, line: uint) -> ! { - use any::AnyRefExt; - use rt::in_green_task_context; - use rt::local::Local; - use rt::task::Task; - use str::Str; - use unstable::intrinsics; - - unsafe { - let task: *mut Task; - // Note that this should be the only allocation performed in this block. - // Currently this means that fail!() on OOM will invoke this code path, - // but then again we're not really ready for failing on OOM anyway. If - // we do start doing this, then we should propagate this allocation to - // be performed in the parent of this task instead of the task that's - // failing. - let msg = ~msg as ~Any; - - { - //let msg: &Any = msg; - let msg_s = match msg.as_ref::<&'static str>() { - Some(s) => *s, - None => match msg.as_ref::<~str>() { - Some(s) => s.as_slice(), - None => "~Any", - } - }; - - if !in_green_task_context() { - rterrln!("failed in non-task context at '{}', {}:{}", - msg_s, file, line); - intrinsics::abort(); - } - - task = Local::unsafe_borrow(); - let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or(""); - - // XXX: this should no get forcibly printed to the console, this should - // either be sent to the parent task (ideally), or get printed to - // the task's logger. Right now the logger is actually a uvio - // instance, which uses unkillable blocks internally for various - // reasons. This will cause serious trouble if the task is failing - // due to mismanagment of its own kill flag, so calling our own - // logger in its current state is a bit of a problem. - - rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line); - - if (*task).unwinder.unwinding { - rtabort!("unwinding again"); - } - } - - (*task).unwinder.begin_unwind(msg); +impl Drop for Death { + fn drop(&mut self) { + // make this type noncopyable } } #[cfg(test)] mod test { use super::*; - use rt::test::*; use prelude::*; + use task; #[test] fn local_heap() { - do run_in_newsched_task() { - let a = @5; - let b = a; - assert!(*a == 5); - assert!(*b == 5); - } + let a = @5; + let b = a; + assert!(*a == 5); + assert!(*b == 5); } #[test] fn tls() { use local_data; - do run_in_newsched_task() { - local_data_key!(key: @~str) - local_data::set(key, @~"data"); - assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data"); - local_data_key!(key2: @~str) - local_data::set(key2, @~"data"); - assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data"); - } + local_data_key!(key: @~str) + local_data::set(key, @~"data"); + assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data"); + local_data_key!(key2: @~str) + local_data::set(key2, @~"data"); + assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data"); } #[test] fn unwind() { - do run_in_newsched_task() { - let result = spawntask_try(proc()()); - rtdebug!("trying first assert"); - assert!(result.is_ok()); - let result = spawntask_try(proc() fail!()); - rtdebug!("trying second assert"); - assert!(result.is_err()); - } + let result = task::try(proc()()); + rtdebug!("trying first assert"); + assert!(result.is_ok()); + let result = task::try::<()>(proc() fail!()); + rtdebug!("trying second assert"); + assert!(result.is_err()); } #[test] fn rng() { - do run_in_uv_task() { - use rand::{rng, Rng}; - let mut r = rng(); - let _ = r.next_u32(); - } + use rand::{rng, Rng}; + let mut r = rng(); + let _ = r.next_u32(); } #[test] fn logging() { - do run_in_uv_task() { - info!("here i am. logging in a newsched task"); - } + info!("here i am. logging in a newsched task"); } #[test] fn comm_stream() { - do run_in_newsched_task() { - let (port, chan) = Chan::new(); - chan.send(10); - assert!(port.recv() == 10); - } + let (port, chan) = Chan::new(); + chan.send(10); + assert!(port.recv() == 10); } #[test] fn comm_shared_chan() { - do run_in_newsched_task() { - let (port, chan) = SharedChan::new(); - chan.send(10); - assert!(port.recv() == 10); - } + let (port, chan) = SharedChan::new(); + chan.send(10); + assert!(port.recv() == 10); } #[test] fn heap_cycles() { use option::{Option, Some, None}; - do run_in_newsched_task { - struct List { - next: Option<@mut List>, - } + struct List { + next: Option<@mut List>, + } - let a = @mut List { next: None }; - let b = @mut List { next: Some(a) }; + let a = @mut List { next: None }; + let b = @mut List { next: Some(a) }; - a.next = Some(b); - } + a.next = Some(b); } #[test] #[should_fail] - fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) } + fn test_begin_unwind() { + use rt::unwind::begin_unwind; + begin_unwind("cause", file!(), line!()) + } + + // Task blocking tests + + #[test] + fn block_and_wake() { + let task = ~Task::new(); + let mut task = BlockedTask::block(task).wake().unwrap(); + task.destroyed = true; + } } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs deleted file mode 100644 index 2b48b396c99e9..0000000000000 --- a/src/libstd/rt/test.rs +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; - -use clone::Clone; -use container::Container; -use iter::{Iterator, range}; -use option::{Some, None}; -use os; -use path::GenericPath; -use path::Path; -use rand::Rng; -use rand; -use result::{Result, Ok, Err}; -use rt::basic; -use rt::deque::BufferPool; -use comm::Chan; -use rt::new_event_loop; -use rt::sched::Scheduler; -use rt::sleeper_list::SleeperList; -use rt::task::Task; -use rt::thread::Thread; -use task::TaskResult; -use unstable::{run_in_bare_thread}; -use vec; -use vec::{OwnedVector, MutableVector, ImmutableVector}; - -pub fn new_test_uv_sched() -> Scheduler { - - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = Scheduler::new(new_event_loop(), - worker, - ~[stealer], - SleeperList::new()); - - // Don't wait for the Shutdown message - sched.no_sleep = true; - return sched; - -} - -pub fn new_test_sched() -> Scheduler { - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = Scheduler::new(basic::event_loop(), - worker, - ~[stealer], - SleeperList::new()); - - // Don't wait for the Shutdown message - sched.no_sleep = true; - return sched; -} - -pub fn run_in_uv_task(f: proc()) { - do run_in_bare_thread { - run_in_uv_task_core(f); - } -} - -pub fn run_in_newsched_task(f: proc()) { - do run_in_bare_thread { - run_in_newsched_task_core(f); - } -} - -pub fn run_in_uv_task_core(f: proc()) { - - use rt::sched::Shutdown; - - let mut sched = ~new_test_uv_sched(); - let exit_handle = sched.make_handle(); - - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - let mut exit_handle = exit_handle; - exit_handle.send(Shutdown); - rtassert!(exit_status.is_ok()); - }; - let mut task = ~Task::new_root(&mut sched.stack_pool, None, f); - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); -} - -pub fn run_in_newsched_task_core(f: proc()) { - use rt::sched::Shutdown; - - let mut sched = ~new_test_sched(); - let exit_handle = sched.make_handle(); - - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - let mut exit_handle = exit_handle; - exit_handle.send(Shutdown); - rtassert!(exit_status.is_ok()); - }; - let mut task = ~Task::new_root(&mut sched.stack_pool, None, f); - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); -} - -#[cfg(target_os="macos")] -#[allow(non_camel_case_types)] -mod darwin_fd_limit { - /*! - * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the - * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low - * for our multithreaded scheduler testing, depending on the number of cores available. - * - * This fixes issue #7772. - */ - - use libc; - type rlim_t = libc::uint64_t; - struct rlimit { - rlim_cur: rlim_t, - rlim_max: rlim_t - } - #[nolink] - extern { - // name probably doesn't need to be mut, but the C function doesn't specify const - fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint, - oldp: *mut libc::c_void, oldlenp: *mut libc::size_t, - newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int; - fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int; - fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int; - } - static CTL_KERN: libc::c_int = 1; - static KERN_MAXFILESPERPROC: libc::c_int = 29; - static RLIMIT_NOFILE: libc::c_int = 8; - - pub unsafe fn raise_fd_limit() { - // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc - // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value. - use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null}; - use mem::size_of_val; - use os::last_os_error; - - // Fetch the kern.maxfilesperproc value - let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC]; - let mut maxfiles: libc::c_int = 0; - let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; - if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2, - to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void, - to_mut_unsafe_ptr(&mut size), - mut_null(), 0) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling sysctl: {}", err); - return; - } - - // Fetch the current resource limits - let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0}; - if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling getrlimit: {}", err); - return; - } - - // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit - rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max); - - // Set our newly-increased resource limit - if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling setrlimit: {}", err); - return; - } - } -} - -#[cfg(not(target_os="macos"))] -mod darwin_fd_limit { - pub unsafe fn raise_fd_limit() {} -} - -#[doc(hidden)] -pub fn prepare_for_lots_of_tests() { - // Bump the fd limit on OS X. See darwin_fd_limit for an explanation. - unsafe { darwin_fd_limit::raise_fd_limit() } -} - -/// Create more than one scheduler and run a function in a task -/// in one of the schedulers. The schedulers will stay alive -/// until the function `f` returns. -pub fn run_in_mt_newsched_task(f: proc()) { - use os; - use from_str::FromStr; - use rt::sched::Shutdown; - use rt::util; - - // see comment in other function (raising fd limits) - prepare_for_lots_of_tests(); - - do run_in_bare_thread { - let nthreads = match os::getenv("RUST_RT_TEST_THREADS") { - Some(nstr) => FromStr::from_str(nstr).unwrap(), - None => { - if util::limit_thread_creation_due_to_osx_and_valgrind() { - 1 - } else { - // Using more threads than cores in test code - // to force the OS to preempt them frequently. - // Assuming that this help stress test concurrent types. - util::num_cpus() * 2 - } - } - }; - - let sleepers = SleeperList::new(); - - let mut handles = ~[]; - let mut scheds = ~[]; - - let mut pool = BufferPool::<~Task>::new(); - let workers = range(0, nthreads).map(|_| pool.deque()); - let (workers, stealers) = vec::unzip(workers); - - for worker in workers.move_iter() { - let loop_ = new_event_loop(); - let mut sched = ~Scheduler::new(loop_, - worker, - stealers.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - - handles.push(handle); - scheds.push(sched); - } - - let handles = handles; // Work around not being able to capture mut - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - // Tell schedulers to exit - let mut handles = handles; - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - - rtassert!(exit_status.is_ok()); - }; - let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, - None, - f); - main_task.death.on_exit = Some(on_exit); - - let mut threads = ~[]; - - let main_thread = { - let sched = scheds.pop(); - let main_task = main_task; - do Thread::start { - sched.bootstrap(main_task); - } - }; - threads.push(main_thread); - - while !scheds.is_empty() { - let mut sched = scheds.pop(); - let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || { - rtdebug!("bootstrapping non-primary scheduler"); - }; - let sched = sched; - let thread = do Thread::start { - sched.bootstrap(bootstrap_task); - }; - - threads.push(thread); - } - - // Wait for schedulers - for thread in threads.move_iter() { - thread.join(); - } - } - -} - -/// Test tasks will abort on failure instead of unwinding -pub fn spawntask(f: proc()) { - Scheduler::run_task(Task::build_child(None, f)); -} - -/// Create a new task and run it right now. Aborts on failure -pub fn spawntask_later(f: proc()) { - Scheduler::run_task_later(Task::build_child(None, f)); -} - -pub fn spawntask_random(f: proc()) { - use rand::{Rand, rng}; - - let mut rng = rng(); - let run_now: bool = Rand::rand(&mut rng); - - if run_now { - spawntask(f) - } else { - spawntask_later(f) - } -} - -pub fn spawntask_try(f: proc()) -> Result<(),()> { - - let (port, chan) = Chan::new(); - let on_exit: proc(TaskResult) = proc(exit_status) { - chan.send(exit_status) - }; - - let mut new_task = Task::build_root(None, f); - new_task.death.on_exit = Some(on_exit); - - Scheduler::run_task(new_task); - - let exit_status = port.recv(); - if exit_status.is_ok() { Ok(()) } else { Err(()) } - -} - -/// Spawn a new task in a new scheduler and return a thread handle. -pub fn spawntask_thread(f: proc()) -> Thread<()> { - let thread = do Thread::start { - run_in_newsched_task_core(f); - }; - - return thread; -} - -/// Get a ~Task for testing purposes other than actually scheduling it. -pub fn with_test_task(blk: proc(~Task) -> ~Task) { - do run_in_bare_thread { - let mut sched = ~new_test_sched(); - let task = blk(~Task::new_root(&mut sched.stack_pool, - None, - proc() {})); - cleanup_task(task); - } -} - -/// Use to cleanup tasks created for testing but not "run". -pub fn cleanup_task(mut task: ~Task) { - task.destroyed = true; -} - -/// Get a port number, starting at 9600, for use in tests -pub fn next_test_port() -> u16 { - use unstable::mutex::{Mutex, MUTEX_INIT}; - static mut lock: Mutex = MUTEX_INIT; - static mut next_offset: u16 = 0; - unsafe { - let base = base_port(); - lock.lock(); - let ret = base + next_offset; - next_offset += 1; - lock.unlock(); - return ret; - } -} - -/// Get a temporary path which could be the location of a unix socket -pub fn next_test_unix() -> Path { - if cfg!(unix) { - os::tmpdir().join(rand::task_rng().gen_ascii_str(20)) - } else { - Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20)) - } -} - -/// Get a unique IPv4 localhost:port pair starting at 9600 -pub fn next_test_ip4() -> SocketAddr { - SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() } -} - -/// Get a unique IPv6 localhost:port pair starting at 9600 -pub fn next_test_ip6() -> SocketAddr { - SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() } -} - -/* -XXX: Welcome to MegaHack City. - -The bots run multiple builds at the same time, and these builds -all want to use ports. This function figures out which workspace -it is running in and assigns a port range based on it. -*/ -fn base_port() -> u16 { - use os; - use str::StrSlice; - use vec::ImmutableVector; - - let base = 9600u16; - let range = 1000u16; - - let bases = [ - ("32-opt", base + range * 1), - ("32-noopt", base + range * 2), - ("64-opt", base + range * 3), - ("64-noopt", base + range * 4), - ("64-opt-vg", base + range * 5), - ("all-opt", base + range * 6), - ("snap3", base + range * 7), - ("dist", base + range * 8) - ]; - - // FIXME (#9639): This needs to handle non-utf8 paths - let path = os::getcwd(); - let path_s = path.as_str().unwrap(); - - let mut final_base = base; - - for &(dir, base) in bases.iter() { - if path_s.contains(dir) { - final_base = base; - break; - } - } - - return final_base; -} - -/// Get a constant that represents the number of times to repeat -/// stress tests. Default 1. -pub fn stress_factor() -> uint { - use os::getenv; - use from_str::from_str; - - match getenv("RUST_RT_STRESS") { - Some(val) => from_str::(val).unwrap(), - None => 1 - } -} diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index 6128f310a2ebf..f4f4aaa276524 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -33,7 +33,7 @@ pub struct Thread { priv packet: ~Option, } -static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024; +static DEFAULT_STACK_SIZE: uint = 1024 * 1024; // This is the starting point of rust os threads. The first thing we do // is make sure that we don't trigger __morestack (also why this has a @@ -41,9 +41,9 @@ static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024; // and invoke it. #[no_split_stack] extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return { - use rt::context; + use unstable::stack; unsafe { - context::record_stack_bounds(0, uint::max_value); + stack::record_stack_bounds(0, uint::max_value); let f: ~proc() = cast::transmute(main); (*f)(); cast::transmute(0 as imp::rust_thread_return) @@ -69,6 +69,12 @@ impl Thread<()> { /// called, when the `Thread` falls out of scope its destructor will block /// waiting for the OS thread. pub fn start(main: proc() -> T) -> Thread { + Thread::start_stack(DEFAULT_STACK_SIZE, main) + } + + /// Performs the same functionality as `start`, but specifies an explicit + /// stack size for the new thread. + pub fn start_stack(stack: uint, main: proc() -> T) -> Thread { // We need the address of the packet to fill in to be stable so when // `main` fills it in it's still valid, so allocate an extra ~ box to do @@ -78,7 +84,7 @@ impl Thread<()> { *cast::transmute::<&~Option, **mut Option>(&packet) }; let main: proc() = proc() unsafe { *packet2 = Some(main()); }; - let native = unsafe { imp::create(~main) }; + let native = unsafe { imp::create(stack, ~main) }; Thread { native: native, @@ -94,8 +100,14 @@ impl Thread<()> { /// systems. Note that platforms may not keep the main program alive even if /// there are detached thread still running around. pub fn spawn(main: proc()) { + Thread::spawn_stack(DEFAULT_STACK_SIZE, main) + } + + /// Performs the same functionality as `spawn`, but explicitly specifies a + /// stack size for the new thread. + pub fn spawn_stack(stack: uint, main: proc()) { unsafe { - let handle = imp::create(~main); + let handle = imp::create(stack, ~main); imp::detach(handle); } } @@ -132,8 +144,6 @@ impl Drop for Thread { #[cfg(windows)] mod imp { - use super::DEFAULT_STACK_SIZE; - use cast; use libc; use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL, @@ -143,9 +153,9 @@ mod imp { pub type rust_thread = HANDLE; pub type rust_thread_return = DWORD; - pub unsafe fn create(p: ~proc()) -> rust_thread { + pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread { let arg: *mut libc::c_void = cast::transmute(p); - CreateThread(ptr::mut_null(), DEFAULT_STACK_SIZE, super::thread_start, + CreateThread(ptr::mut_null(), stack as libc::size_t, super::thread_start, arg, 0, ptr::mut_null()) } @@ -183,17 +193,17 @@ mod imp { use libc::consts::os::posix01::PTHREAD_CREATE_JOINABLE; use libc; use ptr; - use super::DEFAULT_STACK_SIZE; use unstable::intrinsics; pub type rust_thread = libc::pthread_t; pub type rust_thread_return = *libc::c_void; - pub unsafe fn create(p: ~proc()) -> rust_thread { + pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread { let mut native: libc::pthread_t = intrinsics::uninit(); let mut attr: libc::pthread_attr_t = intrinsics::uninit(); assert_eq!(pthread_attr_init(&mut attr), 0); - assert_eq!(pthread_attr_setstacksize(&mut attr, DEFAULT_STACK_SIZE), 0); + assert_eq!(pthread_attr_setstacksize(&mut attr, + stack as libc::size_t), 0); assert_eq!(pthread_attr_setdetachstate(&mut attr, PTHREAD_CREATE_JOINABLE), 0); diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs deleted file mode 100644 index 5e867bcdfbac6..0000000000000 --- a/src/libstd/rt/tube.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A very simple unsynchronized channel type for sending buffered data from -//! scheduler context to task context. -//! -//! XXX: This would be safer to use if split into two types like Port/Chan - -use option::*; -use clone::Clone; -use super::rc::RC; -use rt::sched::Scheduler; -use rt::kill::BlockedTask; -use rt::local::Local; -use vec::OwnedVector; -use container::Container; - -struct TubeState { - blocked_task: Option, - buf: ~[T] -} - -pub struct Tube { - priv p: RC> -} - -impl Tube { - pub fn new() -> Tube { - Tube { - p: RC::new(TubeState { - blocked_task: None, - buf: ~[] - }) - } - } - - pub fn send(&mut self, val: T) { - rtdebug!("tube send"); - unsafe { - let state = self.p.unsafe_borrow_mut(); - (*state).buf.push(val); - - if (*state).blocked_task.is_some() { - // There's a waiting task. Wake it up - rtdebug!("waking blocked tube"); - let task = (*state).blocked_task.take_unwrap(); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(task); - } - } - } - - pub fn recv(&mut self) -> T { - unsafe { - let state = self.p.unsafe_borrow_mut(); - if !(*state).buf.is_empty() { - return (*state).buf.shift(); - } else { - // Block and wait for the next message - rtdebug!("blocking on tube recv"); - assert!(self.p.refcount() > 1); // There better be somebody to wake us up - assert!((*state).blocked_task.is_none()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|_, task| { - (*state).blocked_task = Some(task); - }); - rtdebug!("waking after tube recv"); - let buf = &mut (*state).buf; - assert!(!buf.is_empty()); - return buf.shift(); - } - } - } -} - -impl Clone for Tube { - fn clone(&self) -> Tube { - Tube { p: self.p.clone() } - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use rt::rtio::EventLoop; - use rt::sched::Scheduler; - use rt::local::Local; - use super::*; - use prelude::*; - - #[test] - fn simple_test() { - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let mut tube_clone = tube_clone.take_unwrap(); - tube_clone.send(1); - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn blocking_test() { - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let tube_clone = tube_clone.take_unwrap(); - do sched.event_loop.callback { - let mut tube_clone = tube_clone; - // The task should be blocked on this now and - // sending will wake it up. - tube_clone.send(1); - } - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn many_blocking_test() { - static MAX: int = 100; - - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - callback_send(tube_clone.take_unwrap(), 0); - - fn callback_send(tube: Tube, i: int) { - if i == 100 { - return - } - - let mut sched = Local::borrow(None::); - do sched.get().event_loop.callback { - let mut tube = tube; - // The task should be blocked on this now and - // sending will wake it up. - tube.send(i); - callback_send(tube, i + 1); - } - } - - sched.enqueue_blocked_task(task); - }); - - for i in range(0, MAX) { - let j = tube.recv(); - assert!(j == i); - } - } - } -} diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs index 3f6f54a9c0eee..9706dbae4c61b 100644 --- a/src/libstd/rt/unwind.rs +++ b/src/libstd/rt/unwind.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - // Implementation of Rust stack unwinding // -// For background on exception handling and stack unwinding please see "Exception Handling in LLVM" -// (llvm.org/docs/ExceptionHandling.html) and documents linked from it. +// For background on exception handling and stack unwinding please see +// "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and +// documents linked from it. // These are also good reads: // http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/ // http://monoinfinito.wordpress.com/series/exception-handling-in-c/ @@ -21,41 +21,55 @@ // ~~~ A brief summary ~~~ // Exception handling happens in two phases: a search phase and a cleanup phase. // -// In both phases the unwinder walks stack frames from top to bottom using information from -// the stack frame unwind sections of the current process's modules ("module" here refers to -// an OS module, i.e. an executable or a dynamic library). +// In both phases the unwinder walks stack frames from top to bottom using +// information from the stack frame unwind sections of the current process's +// modules ("module" here refers to an OS module, i.e. an executable or a +// dynamic library). // -// For each stack frame, it invokes the associated "personality routine", whose address is also -// stored in the unwind info section. +// For each stack frame, it invokes the associated "personality routine", whose +// address is also stored in the unwind info section. // -// In the search phase, the job of a personality routine is to examine exception object being -// thrown, and to decide whether it should be caught at that stack frame. Once the handler frame -// has been identified, cleanup phase begins. +// In the search phase, the job of a personality routine is to examine exception +// object being thrown, and to decide whether it should be caught at that stack +// frame. Once the handler frame has been identified, cleanup phase begins. // -// In the cleanup phase, personality routines invoke cleanup code associated with their -// stack frames (i.e. destructors). Once stack has been unwound down to the handler frame level, -// unwinding stops and the last personality routine transfers control to its' catch block. +// In the cleanup phase, personality routines invoke cleanup code associated +// with their stack frames (i.e. destructors). Once stack has been unwound down +// to the handler frame level, unwinding stops and the last personality routine +// transfers control to its' catch block. // // ~~~ Frame unwind info registration ~~~ -// Each module has its' own frame unwind info section (usually ".eh_frame"), and unwinder needs -// to know about all of them in order for unwinding to be able to cross module boundaries. +// Each module has its' own frame unwind info section (usually ".eh_frame"), and +// unwinder needs to know about all of them in order for unwinding to be able to +// cross module boundaries. // -// On some platforms, like Linux, this is achieved by dynamically enumerating currently loaded -// modules via the dl_iterate_phdr() API and finding all .eh_frame sections. +// On some platforms, like Linux, this is achieved by dynamically enumerating +// currently loaded modules via the dl_iterate_phdr() API and finding all +// .eh_frame sections. // -// Others, like Windows, require modules to actively register their unwind info sections by calling -// __register_frame_info() API at startup. -// In the latter case it is essential that there is only one copy of the unwinder runtime -// in the process. This is usually achieved by linking to the dynamic version of the unwind -// runtime. +// Others, like Windows, require modules to actively register their unwind info +// sections by calling __register_frame_info() API at startup. In the latter +// case it is essential that there is only one copy of the unwinder runtime in +// the process. This is usually achieved by linking to the dynamic version of +// the unwind runtime. // // Currently Rust uses unwind runtime provided by libgcc. -use prelude::*; -use cast::transmute; -use task::TaskResult; +use any::{Any, AnyRefExt}; +use c_str::CString; +use cast; +use kinds::Send; +use libc::{c_char, size_t}; use libc::{c_void, c_int}; -use self::libunwind::*; +use option::{Some, None, Option}; +use result::{Err, Ok}; +use rt::local::Local; +use rt::task::Task; +use str::Str; +use task::TaskResult; +use unstable::intrinsics; + +use uw = self::libunwind; mod libunwind { //! Unwind library interface @@ -110,34 +124,41 @@ mod libunwind { } pub struct Unwinder { - unwinding: bool, - cause: Option<~Any> + priv unwinding: bool, + priv cause: Option<~Any> } impl Unwinder { + pub fn new() -> Unwinder { + Unwinder { + unwinding: false, + cause: None, + } + } + + pub fn unwinding(&self) -> bool { + self.unwinding + } pub fn try(&mut self, f: ||) { use unstable::raw::Closure; unsafe { - let closure: Closure = transmute(f); - let code = transmute(closure.code); - let env = transmute(closure.env); - - let ep = rust_try(try_fn, code, env); + let closure: Closure = cast::transmute(f); + let ep = rust_try(try_fn, closure.code as *c_void, + closure.env as *c_void); if !ep.is_null() { rtdebug!("Caught {}", (*ep).exception_class); - _Unwind_DeleteException(ep); + uw::_Unwind_DeleteException(ep); } } extern fn try_fn(code: *c_void, env: *c_void) { unsafe { - let closure: Closure = Closure { - code: transmute(code), - env: transmute(env), - }; - let closure: || = transmute(closure); + let closure: || = cast::transmute(Closure { + code: code as *(), + env: env as *(), + }); closure(); } } @@ -145,10 +166,11 @@ impl Unwinder { extern { // Rust's try-catch // When f(...) returns normally, the return value is null. - // When f(...) throws, the return value is a pointer to the caught exception object. + // When f(...) throws, the return value is a pointer to the caught + // exception object. fn rust_try(f: extern "C" fn(*c_void, *c_void), code: *c_void, - data: *c_void) -> *_Unwind_Exception; + data: *c_void) -> *uw::_Unwind_Exception; } } @@ -159,21 +181,21 @@ impl Unwinder { self.cause = Some(cause); unsafe { - let exception = ~_Unwind_Exception { + let exception = ~uw::_Unwind_Exception { exception_class: rust_exception_class(), exception_cleanup: exception_cleanup, private_1: 0, private_2: 0 }; - let error = _Unwind_RaiseException(transmute(exception)); + let error = uw::_Unwind_RaiseException(cast::transmute(exception)); rtabort!("Could not unwind stack, error = {}", error as int) } - extern "C" fn exception_cleanup(_unwind_code: _Unwind_Reason_Code, - exception: *_Unwind_Exception) { + extern "C" fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code, + exception: *uw::_Unwind_Exception) { rtdebug!("exception_cleanup()"); unsafe { - let _: ~_Unwind_Exception = transmute(exception); + let _: ~uw::_Unwind_Exception = cast::transmute(exception); } } } @@ -189,68 +211,146 @@ impl Unwinder { // Rust's exception class identifier. This is used by personality routines to // determine whether the exception was thrown by their own runtime. -fn rust_exception_class() -> _Unwind_Exception_Class { - let bytes = bytes!("MOZ\0RUST"); // vendor, language - unsafe { - let ptr: *_Unwind_Exception_Class = transmute(bytes.as_ptr()); - *ptr - } +fn rust_exception_class() -> uw::_Unwind_Exception_Class { + // M O Z \0 R U S T -- vendor, language + 0x4d4f5a_00_52555354 } - -// We could implement our personality routine in pure Rust, however exception info decoding -// is tedious. More importantly, personality routines have to handle various platform -// quirks, which are not fun to maintain. For this reason, we attempt to reuse personality -// routine of the C language: __gcc_personality_v0. +// We could implement our personality routine in pure Rust, however exception +// info decoding is tedious. More importantly, personality routines have to +// handle various platform quirks, which are not fun to maintain. For this +// reason, we attempt to reuse personality routine of the C language: +// __gcc_personality_v0. // -// Since C does not support exception catching, __gcc_personality_v0 simply always -// returns _URC_CONTINUE_UNWIND in search phase, and always returns _URC_INSTALL_CONTEXT -// (i.e. "invoke cleanup code") in cleanup phase. +// Since C does not support exception catching, __gcc_personality_v0 simply +// always returns _URC_CONTINUE_UNWIND in search phase, and always returns +// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase. // -// This is pretty close to Rust's exception handling approach, except that Rust does have -// a single "catch-all" handler at the bottom of each task's stack. +// This is pretty close to Rust's exception handling approach, except that Rust +// does have a single "catch-all" handler at the bottom of each task's stack. // So we have two versions: -// - rust_eh_personality, used by all cleanup landing pads, which never catches, so -// the behavior of __gcc_personality_v0 is perfectly adequate there, and -// - rust_eh_personality_catch, used only by rust_try(), which always catches. This is -// achieved by overriding the return value in search phase to always say "catch!". +// - rust_eh_personality, used by all cleanup landing pads, which never catches, +// so the behavior of __gcc_personality_v0 is perfectly adequate there, and +// - rust_eh_personality_catch, used only by rust_try(), which always catches. +// This is achieved by overriding the return value in search phase to always +// say "catch!". extern "C" { fn __gcc_personality_v0(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code; + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code; } #[lang="eh_personality"] #[no_mangle] // so we can reference it by name from middle/trans/base.rs #[doc(hidden)] #[cfg(not(test))] -pub extern "C" fn rust_eh_personality(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code { +pub extern "C" fn rust_eh_personality( + version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context +) -> uw::_Unwind_Reason_Code +{ unsafe { - __gcc_personality_v0(version, actions, exception_class, ue_header, context) + __gcc_personality_v0(version, actions, exception_class, ue_header, + context) } } #[no_mangle] // referenced from rust_try.ll #[doc(hidden)] #[cfg(not(test))] -pub extern "C" fn rust_eh_personality_catch(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code { - if (actions as c_int & _UA_SEARCH_PHASE as c_int) != 0 { // search phase - _URC_HANDLER_FOUND // catch! +pub extern "C" fn rust_eh_personality_catch( + version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context +) -> uw::_Unwind_Reason_Code +{ + if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase + uw::_URC_HANDLER_FOUND // catch! } else { // cleanup phase unsafe { - __gcc_personality_v0(version, actions, exception_class, ue_header, context) + __gcc_personality_v0(version, actions, exception_class, ue_header, + context) } } } + +/// This is the entry point of unwinding for things like lang items and such. +/// The arguments are normally generated by the compiler, and need to +/// have static lifetimes. +pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { + #[inline] + fn static_char_ptr(p: *c_char) -> &'static str { + let s = unsafe { CString::new(p, false) }; + match s.as_str() { + Some(s) => unsafe { cast::transmute::<&str, &'static str>(s) }, + None => rtabort!("message wasn't utf8?") + } + } + + let msg = static_char_ptr(msg); + let file = static_char_ptr(file); + + begin_unwind(msg, file, line as uint) +} + +/// This is the entry point of unwinding for fail!() and assert!(). +pub fn begin_unwind(msg: M, file: &'static str, line: uint) -> ! { + unsafe { + let task: *mut Task; + // Note that this should be the only allocation performed in this block. + // Currently this means that fail!() on OOM will invoke this code path, + // but then again we're not really ready for failing on OOM anyway. If + // we do start doing this, then we should propagate this allocation to + // be performed in the parent of this task instead of the task that's + // failing. + let msg = ~msg as ~Any; + + { + let msg_s = match msg.as_ref::<&'static str>() { + Some(s) => *s, + None => match msg.as_ref::<~str>() { + Some(s) => s.as_slice(), + None => "~Any", + } + }; + + // It is assumed that all reasonable rust code will have a local + // task at all times. This means that this `try_unsafe_borrow` will + // succeed almost all of the time. There are border cases, however, + // when the runtime has *almost* set up the local task, but hasn't + // quite gotten there yet. In order to get some better diagnostics, + // we print on failure and immediately abort the whole process if + // there is no local task available. + match Local::try_unsafe_borrow() { + Some(t) => { + task = t; + let n = (*task).name.as_ref() + .map(|n| n.as_slice()).unwrap_or(""); + + rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, + file, line); + } + None => { + rterrln!("failed at '{}', {}:{}", msg_s, file, line); + intrinsics::abort(); + } + } + + if (*task).unwinder.unwinding { + rtabort!("unwinding again"); + } + } + + (*task).unwinder.begin_unwind(msg); + } +} diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 93721986f3c25..730a38ce8867d 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -15,7 +15,6 @@ use libc; use option::{Some, None, Option}; use os; use str::StrSlice; -use unstable::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; use unstable::running_on_valgrind; // Indicates whether we should perform expensive sanity checks, including rtassert! @@ -68,11 +67,21 @@ pub fn default_sched_threads() -> uint { } pub fn dumb_println(args: &fmt::Arguments) { - use io::native::file::FileDesc; use io; use libc; - let mut out = FileDesc::new(libc::STDERR_FILENO, false); - fmt::writeln(&mut out as &mut io::Writer, args); + + struct Stderr; + impl io::Writer for Stderr { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDERR_FILENO, + data.as_ptr() as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut w = Stderr; + fmt::writeln(&mut w as &mut io::Writer, args); } pub fn abort(msg: &str) -> ! { @@ -133,13 +142,3 @@ memory and partly incapable of presentation to others.", unsafe { libc::abort() } } } - -static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT; - -pub fn set_exit_status(code: int) { - unsafe { EXIT_STATUS.store(code, SeqCst) } -} - -pub fn get_exit_status() -> int { - unsafe { EXIT_STATUS.load(SeqCst) } -} diff --git a/src/libstd/run.rs b/src/libstd/run.rs index d92291bbfbd06..69704c855ee8f 100644 --- a/src/libstd/run.rs +++ b/src/libstd/run.rs @@ -338,8 +338,8 @@ mod tests { use str; use task::spawn; use unstable::running_on_valgrind; - use io::native::file; - use io::{FileNotFound, Reader, Writer, io_error}; + use io::pipe::PipeStream; + use io::{Writer, Reader, io_error, FileNotFound, OtherIoError}; #[test] #[cfg(not(target_os="android"))] // FIXME(#10380) @@ -426,13 +426,13 @@ mod tests { } fn writeclose(fd: c_int, s: &str) { - let mut writer = file::FileDesc::new(fd, true); + let mut writer = PipeStream::open(fd); writer.write(s.as_bytes()); } fn readclose(fd: c_int) -> ~str { let mut res = ~[]; - let mut reader = file::FileDesc::new(fd, true); + let mut reader = PipeStream::open(fd); let mut buf = [0, ..1024]; loop { match reader.read(buf) { diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs new file mode 100644 index 0000000000000..7b94a3acc2b7d --- /dev/null +++ b/src/libstd/sync/arc.rs @@ -0,0 +1,152 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Atomically reference counted data +//! +//! This modules contains the implementation of an atomically reference counted +//! pointer for the purpose of sharing data between tasks. This is obviously a +//! very unsafe primitive to use, but it has its use cases when implementing +//! concurrent data structures and similar tasks. +//! +//! Great care must be taken to ensure that data races do not arise through the +//! usage of `UnsafeArc`, and this often requires some form of external +//! synchronization. The only guarantee provided to you by this class is that +//! the underlying data will remain valid (not free'd) so long as the reference +//! count is greater than one. + +use cast; +use clone::Clone; +use kinds::Send; +use ops::Drop; +use ptr::RawPtr; +use sync::atomics::{AtomicUint, SeqCst, Relaxed, Acquire}; +use vec; + +/// An atomically reference counted pointer. +/// +/// Enforces no shared-memory safety. +#[unsafe_no_drop_flag] +pub struct UnsafeArc { + priv data: *mut ArcData, +} + +struct ArcData { + count: AtomicUint, + data: T, +} + +unsafe fn new_inner(data: T, refcount: uint) -> *mut ArcData { + let data = ~ArcData { count: AtomicUint::new(refcount), data: data }; + cast::transmute(data) +} + +impl UnsafeArc { + /// Creates a new `UnsafeArc` which wraps the given data. + pub fn new(data: T) -> UnsafeArc { + unsafe { UnsafeArc { data: new_inner(data, 1) } } + } + + /// As new(), but returns an extra pre-cloned handle. + pub fn new2(data: T) -> (UnsafeArc, UnsafeArc) { + unsafe { + let ptr = new_inner(data, 2); + (UnsafeArc { data: ptr }, UnsafeArc { data: ptr }) + } + } + + /// As new(), but returns a vector of as many pre-cloned handles as + /// requested. + pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc] { + unsafe { + if num_handles == 0 { + ~[] // need to free data here + } else { + let ptr = new_inner(data, num_handles); + vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) + } + } + } + + /// Gets a pointer to the inner shared data. Note that care must be taken to + /// ensure that the outer `UnsafeArc` does not fall out of scope while this + /// pointer is in use, otherwise it could possibly contain a use-after-free. + #[inline] + pub fn get(&self) -> *mut T { + unsafe { + assert!((*self.data).count.load(Relaxed) > 0); + return &mut (*self.data).data as *mut T; + } + } + + /// Gets an immutable pointer to the inner shared data. This has the same + /// caveats as the `get` method. + #[inline] + pub fn get_immut(&self) -> *T { + unsafe { + assert!((*self.data).count.load(Relaxed) > 0); + return &(*self.data).data as *T; + } + } +} + +impl Clone for UnsafeArc { + fn clone(&self) -> UnsafeArc { + unsafe { + // This barrier might be unnecessary, but I'm not sure... + let old_count = (*self.data).count.fetch_add(1, Acquire); + assert!(old_count >= 1); + return UnsafeArc { data: self.data }; + } + } +} + +#[unsafe_destructor] +impl Drop for UnsafeArc{ + fn drop(&mut self) { + unsafe { + // Happens when destructing an unwrapper's handle and from + // `#[unsafe_no_drop_flag]` + if self.data.is_null() { + return + } + // Must be acquire+release, not just release, to make sure this + // doesn't get reordered to after the unwrapper pointer load. + let old_count = (*self.data).count.fetch_sub(1, SeqCst); + assert!(old_count >= 1); + if old_count == 1 { + let _: ~ArcData = cast::transmute(self.data); + } + } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + use super::UnsafeArc; + use mem::size_of; + + #[test] + fn test_size() { + assert_eq!(size_of::>(), size_of::<*[int, ..10]>()); + } + + #[test] + fn arclike_newN() { + // Tests that the many-refcounts-at-once constructors don't leak. + let _ = UnsafeArc::new2(~~"hello"); + let x = UnsafeArc::newN(~~"hello", 0); + assert_eq!(x.len(), 0) + let x = UnsafeArc::newN(~~"hello", 1); + assert_eq!(x.len(), 1) + let x = UnsafeArc::newN(~~"hello", 10); + assert_eq!(x.len(), 10) + } +} diff --git a/src/libstd/unstable/atomics.rs b/src/libstd/sync/atomics.rs similarity index 98% rename from src/libstd/unstable/atomics.rs rename to src/libstd/sync/atomics.rs index 9aaccb3ebbac4..bc9d99c0f37d7 100644 --- a/src/libstd/unstable/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -11,13 +11,16 @@ /*! * Atomic types * - * Basic atomic types supporting atomic operations. Each method takes an `Ordering` which - * represents the strength of the memory barrier for that operation. These orderings are the same - * as C++11 atomic orderings [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] + * Basic atomic types supporting atomic operations. Each method takes an + * `Ordering` which represents the strength of the memory barrier for that + * operation. These orderings are the same as C++11 atomic orderings + * [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] * * All atomic types are a single word in size. */ +#[allow(missing_doc)]; + use unstable::intrinsics; use cast; use option::{Option,Some,None}; diff --git a/src/libstd/rt/deque.rs b/src/libstd/sync/deque.rs similarity index 98% rename from src/libstd/rt/deque.rs rename to src/libstd/sync/deque.rs index 770fc9ffa12e2..4d0efcd6ee10a 100644 --- a/src/libstd/rt/deque.rs +++ b/src/libstd/sync/deque.rs @@ -50,15 +50,18 @@ use cast; use clone::Clone; -use iter::range; +use iter::{range, Iterator}; use kinds::Send; use libc; use mem; use ops::Drop; use option::{Option, Some, None}; use ptr; -use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst}; -use unstable::sync::{UnsafeArc, Exclusive}; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicInt, AtomicPtr, SeqCst}; +use unstable::sync::Exclusive; +use vec::{OwnedVector, ImmutableVector}; // Once the queue is less than 1/K full, then it will be downsized. Note that // the deque requires that this number be less than 2. @@ -399,8 +402,8 @@ mod tests { use rt::thread::Thread; use rand; use rand::Rng; - use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, - AtomicUint, INIT_ATOMIC_UINT}; + use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, + AtomicUint, INIT_ATOMIC_UINT}; use vec; #[test] diff --git a/src/test/run-pass/rt-run-twice.rs b/src/libstd/sync/mod.rs similarity index 52% rename from src/test/run-pass/rt-run-twice.rs rename to src/libstd/sync/mod.rs index a9a26c2fbb14a..3213c538152c6 100644 --- a/src/test/run-pass/rt-run-twice.rs +++ b/src/libstd/sync/mod.rs @@ -8,19 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// xfail-fast make-check does not like `#[start]` +//! Useful synchronization primitives +//! +//! This modules contains useful safe and unsafe synchronization primitives. +//! Most of the primitives in this module do not provide any sort of locking +//! and/or blocking at all, but rather provide the necessary tools to build +//! other types of concurrent primitives. -use std::rt; - -#[start] -fn start(argc: int, argv: **u8) -> int { - do rt::start(argc, argv) { - println("First invocation"); - }; - - do rt::start(argc, argv) { - println("Second invocation"); - }; - - 0 -} +pub mod arc; +pub mod atomics; +pub mod deque; +pub mod mpmc_bounded_queue; +pub mod mpsc_queue; +pub mod spsc_queue; diff --git a/src/libstd/rt/mpmc_bounded_queue.rs b/src/libstd/sync/mpmc_bounded_queue.rs similarity index 93% rename from src/libstd/rt/mpmc_bounded_queue.rs rename to src/libstd/sync/mpmc_bounded_queue.rs index 25a3ba8ab48f1..fe51de4e42d06 100644 --- a/src/libstd/rt/mpmc_bounded_queue.rs +++ b/src/libstd/sync/mpmc_bounded_queue.rs @@ -25,15 +25,17 @@ * policies, either expressed or implied, of Dmitry Vyukov. */ +#[allow(missing_doc, dead_code)]; + // http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue -use unstable::sync::UnsafeArc; -use unstable::atomics::{AtomicUint,Relaxed,Release,Acquire}; -use option::*; -use vec; use clone::Clone; use kinds::Send; use num::{Exponential,Algebraic,Round}; +use option::{Option, Some, None}; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicUint,Relaxed,Release,Acquire}; +use vec; struct Node { sequence: AtomicUint, @@ -161,8 +163,8 @@ impl Clone for Queue { mod tests { use prelude::*; use option::*; - use task; use super::Queue; + use native; #[test] fn test() { @@ -170,14 +172,17 @@ mod tests { let nmsgs = 1000u; let mut q = Queue::with_capacity(nthreads*nmsgs); assert_eq!(None, q.pop()); + let (port, chan) = SharedChan::new(); for _ in range(0, nthreads) { let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { + let chan = chan.clone(); + do native::task::spawn { let mut q = q; for i in range(0, nmsgs) { assert!(q.push(i)); } + chan.send(()); } } @@ -186,7 +191,7 @@ mod tests { let (completion_port, completion_chan) = Chan::new(); completion_ports.push(completion_port); let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { + do native::task::spawn { let mut q = q; let mut i = 0u; loop { @@ -205,5 +210,8 @@ mod tests { for completion_port in completion_ports.mut_iter() { assert_eq!(nmsgs, completion_port.recv()); } + for _ in range(0, nthreads) { + port.recv(); + } } } diff --git a/src/libstd/rt/mpsc_queue.rs b/src/libstd/sync/mpsc_queue.rs similarity index 69% rename from src/libstd/rt/mpsc_queue.rs rename to src/libstd/sync/mpsc_queue.rs index d575028af7043..a249d6ed2e8ce 100644 --- a/src/libstd/rt/mpsc_queue.rs +++ b/src/libstd/sync/mpsc_queue.rs @@ -26,6 +26,14 @@ */ //! A mostly lock-free multi-producer, single consumer queue. +//! +//! This module contains an implementation of a concurrent MPSC queue. This +//! queue can be used to share data between tasks, and is also used as the +//! building block of channels in rust. +//! +//! Note that the current implementation of this queue has a caveat of the `pop` +//! method, and see the method for more information about it. Due to this +//! caveat, this queue may not be appropriate for all use-cases. // http://www.1024cores.net/home/lock-free-algorithms // /queues/non-intrusive-mpsc-node-based-queue @@ -35,9 +43,11 @@ use clone::Clone; use kinds::Send; use ops::Drop; use option::{Option, None, Some}; -use unstable::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; -use unstable::sync::UnsafeArc; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; +/// A result of the `pop` function. pub enum PopResult { /// Some data has been popped Data(T), @@ -61,10 +71,14 @@ struct State { packet: P, } +/// The consumer half of this concurrent queue. This half is used to receive +/// data from the producers. pub struct Consumer { priv state: UnsafeArc>, } +/// The production half of the concurrent queue. This handle may be cloned in +/// order to make handles for new producers. pub struct Producer { priv state: UnsafeArc>, } @@ -75,6 +89,11 @@ impl Clone for Producer { } } +/// Creates a new MPSC queue. The given argument `p` is a user-defined "packet" +/// of information which will be shared by the consumer and the producer which +/// can be re-acquired via the `packet` function. This is helpful when extra +/// state is shared between the producer and consumer, but note that there is no +/// synchronization performed of this data. pub fn queue(p: P) -> (Consumer, Producer) { unsafe { let (a, b) = UnsafeArc::new2(State::new(p)); @@ -92,7 +111,7 @@ impl Node { } impl State { - pub unsafe fn new(p: P) -> State { + unsafe fn new(p: P) -> State { let stub = Node::new(None); State { head: AtomicPtr::new(stub), @@ -122,10 +141,6 @@ impl State { if self.head.load(Acquire) == tail {Empty} else {Inconsistent} } - - unsafe fn is_empty(&mut self) -> bool { - return (*self.tail).next.load(Acquire).is_null(); - } } #[unsafe_destructor] @@ -143,27 +158,42 @@ impl Drop for State { } impl Producer { + /// Pushes a new value onto this queue. pub fn push(&mut self, value: T) { unsafe { (*self.state.get()).push(value) } } - pub fn is_empty(&self) -> bool { - unsafe{ (*self.state.get()).is_empty() } - } + /// Gets an unsafe pointer to the user-defined packet shared by the + /// producers and the consumer. Note that care must be taken to ensure that + /// the lifetime of the queue outlives the usage of the returned pointer. pub unsafe fn packet(&self) -> *mut P { &mut (*self.state.get()).packet as *mut P } } impl Consumer { + /// Pops some data from this queue. + /// + /// Note that the current implementation means that this function cannot + /// return `Option`. It is possible for this queue to be in an + /// inconsistent state where many pushes have suceeded and completely + /// finished, but pops cannot return `Some(t)`. This inconsistent state + /// happens when a pusher is pre-empted at an inopportune moment. + /// + /// This inconsistent state means that this queue does indeed have data, but + /// it does not currently have access to it at this time. pub fn pop(&mut self) -> PopResult { unsafe { (*self.state.get()).pop() } } + /// Attempts to pop data from this queue, but doesn't attempt too hard. This + /// will canonicalize inconsistent states to a `None` value. pub fn casual_pop(&mut self) -> Option { match self.pop() { Data(t) => Some(t), Empty | Inconsistent => None, } } + /// Gets an unsafe pointer to the underlying user-defined packet. See + /// `Producer.packet` for more information. pub unsafe fn packet(&self) -> *mut P { &mut (*self.state.get()).packet as *mut P } @@ -173,8 +203,8 @@ impl Consumer { mod tests { use prelude::*; - use task; use super::{queue, Data, Empty, Inconsistent}; + use native; #[test] fn test_full() { @@ -192,14 +222,17 @@ mod tests { Empty => {} Inconsistent | Data(..) => fail!() } + let (port, chan) = SharedChan::new(); for _ in range(0, nthreads) { let q = p.clone(); - do task::spawn_sched(task::SingleThreaded) { + let chan = chan.clone(); + do native::task::spawn { let mut q = q; for i in range(0, nmsgs) { q.push(i); } + chan.send(()); } } @@ -210,6 +243,9 @@ mod tests { Data(_) => { i += 1 } } } + for _ in range(0, nthreads) { + port.recv(); + } } } diff --git a/src/libstd/rt/spsc_queue.rs b/src/libstd/sync/spsc_queue.rs similarity index 81% rename from src/libstd/rt/spsc_queue.rs rename to src/libstd/sync/spsc_queue.rs index f14533d726a78..6f1b887c27156 100644 --- a/src/libstd/rt/spsc_queue.rs +++ b/src/libstd/sync/spsc_queue.rs @@ -26,12 +26,20 @@ */ // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue + +//! A single-producer single-consumer concurrent queue +//! +//! This module contains the implementation of an SPSC queue which can be used +//! concurrently between two tasks. This data structure is safe to use and +//! enforces the semantics that there is one pusher and one popper. + use cast; use kinds::Send; use ops::Drop; use option::{Some, None, Option}; -use unstable::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; -use unstable::sync::UnsafeArc; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; // Node within the linked list queue of messages to send struct Node { @@ -64,14 +72,34 @@ struct State { packet: P, } +/// Producer half of this queue. This handle is used to push data to the +/// consumer. pub struct Producer { priv state: UnsafeArc>, } +/// Consumer half of this queue. This handle is used to receive data from the +/// producer. pub struct Consumer { priv state: UnsafeArc>, } +/// Creates a new queue. The producer returned is connected to the consumer to +/// push all data to the consumer. +/// +/// # Arguments +/// +/// * `bound` - This queue implementation is implemented with a linked list, +/// and this means that a push is always a malloc. In order to +/// amortize this cost, an internal cache of nodes is maintained +/// to prevent a malloc from always being necessary. This bound is +/// the limit on the size of the cache (if desired). If the value +/// is 0, then the cache has no bound. Otherwise, the cache will +/// never grow larger than `bound` (although the queue itself +/// could be much larger. +/// +/// * `p` - This is the user-defined packet of data which will also be shared +/// between the producer and consumer. pub fn queue(bound: uint, p: P) -> (Consumer, Producer) { @@ -105,21 +133,31 @@ impl Node { } impl Producer { + /// Pushes data onto the queue pub fn push(&mut self, t: T) { unsafe { (*self.state.get()).push(t) } } + /// Tests whether the queue is empty. Note that if this function returns + /// `false`, the return value is significant, but if the return value is + /// `true` then almost no meaning can be attached to the return value. pub fn is_empty(&self) -> bool { unsafe { (*self.state.get()).is_empty() } } + /// Acquires an unsafe pointer to the underlying user-defined packet. Note + /// that care must be taken to ensure that the queue outlives the usage of + /// the packet (because it is an unsafe pointer). pub unsafe fn packet(&self) -> *mut P { &mut (*self.state.get()).packet as *mut P } } impl Consumer { + /// Pops some data from this queue, returning `None` when the queue is + /// empty. pub fn pop(&mut self) -> Option { unsafe { (*self.state.get()).pop() } } + /// Same function as the producer's `packet` method. pub unsafe fn packet(&self) -> *mut P { &mut (*self.state.get()).packet as *mut P } @@ -230,7 +268,7 @@ impl Drop for State { mod test { use prelude::*; use super::queue; - use task; + use native; #[test] fn smoke() { @@ -276,7 +314,8 @@ mod test { fn stress_bound(bound: uint) { let (c, mut p) = queue(bound, ()); - do task::spawn_sched(task::SingleThreaded) { + let (port, chan) = Chan::new(); + do native::task::spawn { let mut c = c; for _ in range(0, 100000) { loop { @@ -287,10 +326,12 @@ mod test { } } } + chan.send(()); } for _ in range(0, 100000) { p.push(1); } + port.recv(); } } } diff --git a/src/libstd/task/mod.rs b/src/libstd/task.rs similarity index 74% rename from src/libstd/task/mod.rs rename to src/libstd/task.rs index 3310dddc3276a..2f0f9bf64af4c 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task.rs @@ -53,22 +53,22 @@ #[allow(missing_doc)]; -use prelude::*; - +use any::Any; use comm::{Chan, Port}; +use kinds::Send; +use option::{None, Some, Option}; use result::{Result, Ok, Err}; -use rt::in_green_task_context; use rt::local::Local; +use rt::task::Task; use send_str::{SendStr, IntoSendStr}; +use str::Str; use util; -#[cfg(test)] use any::Any; +#[cfg(test)] use any::{AnyOwnExt, AnyRefExt}; #[cfg(test)] use comm::SharedChan; #[cfg(test)] use ptr; #[cfg(test)] use result; -pub mod spawn; - /// Indicates the manner in which a task exited. /// /// A task that completes without failing is considered to exit successfully. @@ -80,27 +80,6 @@ pub mod spawn; /// children tasks complete, recommend using a result future. pub type TaskResult = Result<(), ~Any>; -/// Scheduler modes -#[deriving(Eq)] -pub enum SchedMode { - /// Run task on the default scheduler - DefaultScheduler, - /// All tasks run in the same OS thread - SingleThreaded, -} - -/** - * Scheduler configuration options - * - * # Fields - * - * * sched_mode - The operating mode of the scheduler - * - */ -pub struct SchedOpts { - priv mode: SchedMode, -} - /** * Task configuration options * @@ -121,10 +100,9 @@ pub struct SchedOpts { * scheduler other tasks will be impeded or even blocked indefinitely. */ pub struct TaskOpts { - priv watched: bool, - priv notify_chan: Option>, + watched: bool, + notify_chan: Option>, name: Option, - sched: SchedOpts, stack_size: Option } @@ -153,7 +131,7 @@ pub struct TaskBuilder { */ pub fn task() -> TaskBuilder { TaskBuilder { - opts: default_task_opts(), + opts: TaskOpts::new(), gen_body: None, can_not_copy: None, } @@ -169,7 +147,6 @@ impl TaskBuilder { watched: self.opts.watched, notify_chan: notify_chan, name: name, - sched: self.opts.sched, stack_size: self.opts.stack_size }, gen_body: gen_body, @@ -229,11 +206,6 @@ impl TaskBuilder { self.opts.name = Some(name.into_send_str()); } - /// Configure a custom scheduler mode for the task. - pub fn sched_mode(&mut self, mode: SchedMode) { - self.opts.sched.mode = mode; - } - /** * Add a wrapper to the body of the spawned task. * @@ -285,7 +257,6 @@ impl TaskBuilder { watched: x.opts.watched, notify_chan: notify_chan, name: name, - sched: x.opts.sched, stack_size: x.opts.stack_size }; let f = match gen_body { @@ -296,7 +267,9 @@ impl TaskBuilder { f } }; - spawn::spawn_raw(opts, f); + + let t: ~Task = Local::take(); + t.spawn_sibling(opts, f); } /** @@ -328,25 +301,23 @@ impl TaskBuilder { } } - /* Task construction */ -pub fn default_task_opts() -> TaskOpts { - /*! - * The default task options - * - * By default all tasks are supervised by their parent, are spawned - * into the same scheduler, and do not post lifecycle notifications. - */ - - TaskOpts { - watched: true, - notify_chan: None, - name: None, - sched: SchedOpts { - mode: DefaultScheduler, - }, - stack_size: None +impl TaskOpts { + pub fn new() -> TaskOpts { + /*! + * The default task options + * + * By default all tasks are supervised by their parent, are spawned + * into the same scheduler, and do not post lifecycle notifications. + */ + + TaskOpts { + watched: true, + notify_chan: None, + name: None, + stack_size: None + } } } @@ -363,24 +334,6 @@ pub fn spawn(f: proc()) { task.spawn(f) } -pub fn spawn_sched(mode: SchedMode, f: proc()) { - /*! - * Creates a new task on a new or existing scheduler. - * - * When there are no more tasks to execute the - * scheduler terminates. - * - * # Failure - * - * In manual threads mode the number of threads requested must be - * greater than zero. - */ - - let mut task = task(); - task.sched_mode(mode); - task.spawn(f) -} - pub fn try(f: proc() -> T) -> Result { /*! * Execute a function in another task and return either the return value @@ -400,14 +353,10 @@ pub fn try(f: proc() -> T) -> Result { pub fn with_task_name(blk: |Option<&str>| -> U) -> U { use rt::task::Task; - if in_green_task_context() { - let mut task = Local::borrow(None::); - match task.get().name { - Some(ref name) => blk(Some(name.as_slice())), - None => blk(None) - } - } else { - fail!("no task name exists in non-green task context") + let mut task = Local::borrow(None::); + match task.get().name { + Some(ref name) => blk(Some(name.as_slice())), + None => blk(None) } } @@ -415,11 +364,10 @@ pub fn deschedule() { //! Yield control to the task scheduler use rt::local::Local; - use rt::sched::Scheduler; // FIXME(#7544): Optimize this, since we know we won't block. - let sched: ~Scheduler = Local::take(); - sched.yield_now(); + let task: ~Task = Local::take(); + task.yield_now(); } pub fn failing() -> bool { @@ -428,7 +376,7 @@ pub fn failing() -> bool { use rt::task::Task; let mut local = Local::borrow(None::); - local.get().unwinder.unwinding + local.get().unwinder.unwinding() } // The following 8 tests test the following 2^3 combinations: @@ -439,59 +387,43 @@ pub fn failing() -> bool { #[test] fn test_unnamed_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - do spawn { - with_task_name(|name| { - assert!(name.is_none()); - }) - } + do spawn { + with_task_name(|name| { + assert!(name.is_none()); + }) } } #[test] fn test_owned_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name(~"ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name(~"ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } #[test] fn test_static_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name("ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } #[test] fn test_send_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace".into_send_str()); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name("ada lovelace".into_send_str()); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } @@ -562,28 +494,19 @@ fn test_try_fail() { } } -#[cfg(test)] -fn get_sched_id() -> int { - use rt::sched::Scheduler; - let mut sched = Local::borrow(None::); - sched.get().sched_id() as int -} - #[test] fn test_spawn_sched() { + use clone::Clone; + let (po, ch) = SharedChan::new(); fn f(i: int, ch: SharedChan<()>) { - let parent_sched_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - + let ch = ch.clone(); + do spawn { if (i == 0) { ch.send(()); } else { - f(i - 1, ch.clone()); + f(i - 1, ch); } }; @@ -596,16 +519,9 @@ fn test_spawn_sched() { fn test_spawn_sched_childs_on_default_sched() { let (po, ch) = Chan::new(); - // Assuming tests run on the default scheduler - let default_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { + do spawn { let ch = ch; - let parent_sched_id = get_sched_id(); do spawn { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - assert_eq!(child_sched_id, default_id); ch.send(()); }; }; @@ -613,65 +529,6 @@ fn test_spawn_sched_childs_on_default_sched() { po.recv(); } -#[test] -fn test_spawn_sched_blocking() { - use unstable::mutex::Mutex; - - unsafe { - - // Testing that a task in one scheduler can block in foreign code - // without affecting other schedulers - 20u.times(|| { - let (start_po, start_ch) = Chan::new(); - let (fin_po, fin_ch) = Chan::new(); - - let mut lock = Mutex::new(); - let lock2 = lock.clone(); - - do spawn_sched(SingleThreaded) { - let mut lock = lock2; - lock.lock(); - - start_ch.send(()); - - // Block the scheduler thread - lock.wait(); - lock.unlock(); - - fin_ch.send(()); - }; - - // Wait until the other task has its lock - start_po.recv(); - - fn pingpong(po: &Port, ch: &Chan) { - let mut val = 20; - while val > 0 { - val = po.recv(); - ch.try_send(val - 1); - } - } - - let (setup_po, setup_ch) = Chan::new(); - let (parent_po, parent_ch) = Chan::new(); - do spawn { - let (child_po, child_ch) = Chan::new(); - setup_ch.send(child_ch); - pingpong(&child_po, &parent_ch); - }; - - let child_ch = setup_po.recv(); - child_ch.send(20); - pingpong(&parent_po, &child_ch); - lock.lock(); - lock.signal(); - lock.unlock(); - fin_po.recv(); - lock.destroy(); - }) - } -} - #[cfg(test)] fn avoid_copying_the_body(spawnfn: |v: proc()|) { let (p, ch) = Chan::::new(); @@ -735,11 +592,7 @@ fn test_child_doesnt_ref_parent() { #[test] fn test_simple_newsched_spawn() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - spawn(proc()()) - } + spawn(proc()()) } #[test] diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs deleted file mode 100644 index 1148774020a14..0000000000000 --- a/src/libstd/task/spawn.rs +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*!************************************************************************** - * - * WARNING: linked failure has been removed since this doc comment was written, - * but it was so pretty that I didn't want to remove it. - * - * Spawning & linked failure - * - * Several data structures are involved in task management to allow properly - * propagating failure across linked/supervised tasks. - * - * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of - * all tasks that are part of the group. Some tasks are 'members', which - * means if they fail, they will kill everybody else in the taskgroup. - * Other tasks are 'descendants', which means they will not kill tasks - * from this group, but can be killed by failing members. - * - * A new one of these is created each spawn_linked or spawn_supervised. - * - * (2) The "taskgroup" is a per-task control structure that tracks a task's - * spawn configuration. It contains a reference to its taskgroup_arc, a - * reference to its node in the ancestor list (below), and an optionally - * configured notification port. These are stored in TLS. - * - * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which - * tracks 'generations' of taskgroups -- a group's ancestors are groups - * which (directly or transitively) spawn_supervised-ed them. Each task - * is recorded in the 'descendants' of each of its ancestor groups. - * - * Spawning a supervised task is O(n) in the number of generations still - * alive, and exiting (by success or failure) that task is also O(n). - * - * This diagram depicts the references between these data structures: - * - * linked_________________________________ - * ___/ _________ \___ - * / \ | group X | / \ - * ( A ) - - - - - - - > | {A,B} {}|< - - -( B ) - * \___/ |_________| \___/ - * unlinked - * | __ (nil) - * | //| The following code causes this: - * |__ // /\ _________ - * / \ // || | group Y | fn taskA() { - * ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB); - * \___/ / \=====> |_________| spawn_unlinked(taskC); - * supervise /gen \ ... - * | __ \ 00 / } - * | //| \__/ fn taskB() { ... } - * |__ // /\ _________ fn taskC() { - * / \/ || | group Z | spawn_supervised(taskD); - * ( D )- - - ||- - - > | {D} {E} | ... - * \___/ / \=====> |_________| } - * supervise /gen \ fn taskD() { - * | __ \ 01 / spawn_supervised(taskE); - * | //| \__/ ... - * |__ // _________ } - * / \/ | group W | fn taskE() { ... } - * ( E )- - - - - - - > | {E} {} | - * \___/ |_________| - * - * "tcb" "taskgroup_arc" - * "ancestor_list" - * - ****************************************************************************/ - -#[doc(hidden)]; - -use prelude::*; - -use comm::Chan; -use rt::local::Local; -use rt::sched::{Scheduler, Shutdown, TaskFromFriend}; -use rt::task::{Task, Sched}; -use rt::thread::Thread; -use rt::{in_green_task_context, new_event_loop}; -use task::{SingleThreaded, TaskOpts, TaskResult}; - -#[cfg(test)] use task::default_task_opts; -#[cfg(test)] use task; - -pub fn spawn_raw(mut opts: TaskOpts, f: proc()) { - assert!(in_green_task_context()); - - let mut task = if opts.sched.mode != SingleThreaded { - if opts.watched { - Task::build_child(opts.stack_size, f) - } else { - Task::build_root(opts.stack_size, f) - } - } else { - unsafe { - // Creating a 1:1 task:thread ... - let sched: *mut Scheduler = Local::unsafe_borrow(); - let sched_handle = (*sched).make_handle(); - - // Since this is a 1:1 scheduler we create a queue not in - // the stealee set. The run_anything flag is set false - // which will disable stealing. - let (worker, _stealer) = (*sched).work_queue.pool().deque(); - - // Create a new scheduler to hold the new task - let mut new_sched = ~Scheduler::new_special(new_event_loop(), - worker, - (*sched).work_queues.clone(), - (*sched).sleeper_list.clone(), - false, - Some(sched_handle)); - let mut new_sched_handle = new_sched.make_handle(); - - // Allow the scheduler to exit when the pinned task exits - new_sched_handle.send(Shutdown); - - // Pin the new task to the new scheduler - let new_task = if opts.watched { - Task::build_homed_child(opts.stack_size, f, Sched(new_sched_handle)) - } else { - Task::build_homed_root(opts.stack_size, f, Sched(new_sched_handle)) - }; - - // Create a task that will later be used to join with the new scheduler - // thread when it is ready to terminate - let (thread_port, thread_chan) = Chan::new(); - let join_task = do Task::build_child(None) { - debug!("running join task"); - let thread: Thread<()> = thread_port.recv(); - thread.join(); - }; - - // Put the scheduler into another thread - let orig_sched_handle = (*sched).make_handle(); - - let new_sched = new_sched; - let thread = do Thread::start { - let mut new_sched = new_sched; - let mut orig_sched_handle = orig_sched_handle; - - let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool, None) || { - debug!("boostrapping a 1:1 scheduler"); - }; - new_sched.bootstrap(bootstrap_task); - - // Now tell the original scheduler to join with this thread - // by scheduling a thread-joining task on the original scheduler - orig_sched_handle.send(TaskFromFriend(join_task)); - - // NB: We can't simply send a message from here to another task - // because this code isn't running in a task and message passing doesn't - // work outside of tasks. Hence we're sending a scheduler message - // to execute a new task directly to a scheduler. - }; - - // Give the thread handle to the join task - thread_chan.send(thread); - - // When this task is enqueued on the current scheduler it will then get - // forwarded to the scheduler to which it is pinned - new_task - } - }; - - if opts.notify_chan.is_some() { - let notify_chan = opts.notify_chan.take_unwrap(); - let on_exit: proc(TaskResult) = proc(task_result) { - notify_chan.try_send(task_result); - }; - task.death.on_exit = Some(on_exit); - } - - task.name = opts.name.take(); - debug!("spawn calling run_task"); - Scheduler::run_task(task); - -} - -#[test] -fn test_spawn_raw_simple() { - let (po, ch) = Chan::new(); - do spawn_raw(default_task_opts()) { - ch.send(()); - } - po.recv(); -} - -#[test] -fn test_spawn_raw_unsupervise() { - let opts = task::TaskOpts { - watched: false, - notify_chan: None, - .. default_task_opts() - }; - do spawn_raw(opts) { - fail!(); - } -} - -#[test] -fn test_spawn_raw_notify_success() { - let (notify_po, notify_ch) = Chan::new(); - - let opts = task::TaskOpts { - notify_chan: Some(notify_ch), - .. default_task_opts() - }; - do spawn_raw(opts) { - } - assert!(notify_po.recv().is_ok()); -} - -#[test] -fn test_spawn_raw_notify_failure() { - // New bindings for these - let (notify_po, notify_ch) = Chan::new(); - - let opts = task::TaskOpts { - watched: false, - notify_chan: Some(notify_ch), - .. default_task_opts() - }; - do spawn_raw(opts) { - fail!(); - } - assert!(notify_po.recv().is_err()); -} diff --git a/src/libstd/unstable/dynamic_lib.rs b/src/libstd/unstable/dynamic_lib.rs index 03b25fbd0442b..0569fe32c58b3 100644 --- a/src/libstd/unstable/dynamic_lib.rs +++ b/src/libstd/unstable/dynamic_lib.rs @@ -140,7 +140,6 @@ pub mod dl { use path; use ptr; use str; - use unstable::sync::atomic; use result::*; pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { @@ -158,11 +157,7 @@ pub mod dl { static mut lock: Mutex = MUTEX_INIT; unsafe { // dlerror isn't thread safe, so we need to lock around this entire - // sequence. `atomic` asserts that we don't do anything that - // would cause this task to be descheduled, which could deadlock - // the scheduler if it happens while the lock is held. - // FIXME #9105 use a Rust mutex instead of C++ mutexes. - let _guard = atomic(); + // sequence lock.lock(); let _old_error = dlerror(); @@ -208,7 +203,6 @@ pub mod dl { use libc; use path; use ptr; - use unstable::sync::atomic; use result::*; pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { @@ -225,7 +219,6 @@ pub mod dl { pub fn check_for_errors_in(f: || -> T) -> Result { unsafe { - let _guard = atomic(); SetLastError(0); let result = f(); diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index 06f9ba65ae773..e7e8cec9d5f17 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -11,15 +11,13 @@ //! Runtime calls emitted by the compiler. use c_str::ToCStr; -use cast::transmute; use libc::{c_char, size_t, uintptr_t}; -use rt::task; use rt::borrowck; #[cold] #[lang="fail_"] pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! { - task::begin_unwind_raw(expr, file, line); + ::rt::begin_unwind_raw(expr, file, line); } #[cold] @@ -81,15 +79,3 @@ pub unsafe fn check_not_borrowed(a: *u8, line: size_t) { borrowck::check_not_borrowed(a, file, line) } - -#[lang="start"] -pub fn start(main: *u8, argc: int, argv: **c_char) -> int { - use rt; - - unsafe { - return do rt::start(argc, argv as **u8) { - let main: extern "Rust" fn() = transmute(main); - main(); - }; - } -} diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs index 043d99eb1b82b..f4573785996c4 100644 --- a/src/libstd/unstable/mod.rs +++ b/src/libstd/unstable/mod.rs @@ -22,8 +22,8 @@ pub mod simd; pub mod lang; pub mod sync; pub mod mutex; -pub mod atomics; pub mod raw; +pub mod stack; /** diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index 3e7a861b38501..5b2fac8e74e2e 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -48,7 +48,7 @@ #[allow(non_camel_case_types)]; use libc::c_void; -use unstable::atomics; +use sync::atomics; pub struct Mutex { // pointers for the lock/cond handles, atomically updated @@ -333,12 +333,12 @@ mod test { fn somke_cond() { static mut lock: Mutex = MUTEX_INIT; unsafe { + lock.lock(); let t = do Thread::start { lock.lock(); lock.signal(); lock.unlock(); }; - lock.lock(); lock.wait(); lock.unlock(); t.join(); diff --git a/src/libstd/unstable/stack.rs b/src/libstd/unstable/stack.rs new file mode 100644 index 0000000000000..d6cd690eaa9db --- /dev/null +++ b/src/libstd/unstable/stack.rs @@ -0,0 +1,275 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Rust stack-limit management +//! +//! Currently Rust uses a segmented-stack-like scheme in order to detect stack +//! overflow for rust tasks. In this scheme, the prologue of all functions are +//! preceded with a check to see whether the current stack limits are being +//! exceeded. +//! +//! This module provides the functionality necessary in order to manage these +//! stack limits (which are stored in platform-specific locations). The +//! functions here are used at the borders of the task lifetime in order to +//! manage these limits. +//! +//! This function is an unstable module because this scheme for stack overflow +//! detection is not guaranteed to continue in the future. Usage of this module +//! is discouraged unless absolutely necessary. + +static RED_ZONE: uint = 20 * 1024; + +/// This function is invoked from rust's current __morestack function. Segmented +/// stacks are currently not enabled as segmented stacks, but rather one giant +/// stack segment. This means that whenever we run out of stack, we want to +/// truly consider it to be stack overflow rather than allocating a new stack. +#[no_mangle] // - this is called from C code +#[no_split_stack] // - it would be sad for this function to trigger __morestack +#[doc(hidden)] // - Function must be `pub` to get exported, but it's + // irrelevant for documentation purposes. +#[cfg(not(test))] // in testing, use the original libstd's version +pub extern "C" fn rust_stack_exhausted() { + use rt::task::Task; + use option::None; + use rt::local::Local; + use unstable::intrinsics; + + unsafe { + // We're calling this function because the stack just ran out. We need + // to call some other rust functions, but if we invoke the functions + // right now it'll just trigger this handler being called again. In + // order to alleviate this, we move the stack limit to be inside of the + // red zone that was allocated for exactly this reason. + let limit = get_sp_limit(); + record_sp_limit(limit - RED_ZONE / 2); + + // This probably isn't the best course of action. Ideally one would want + // to unwind the stack here instead of just aborting the entire process. + // This is a tricky problem, however. There's a few things which need to + // be considered: + // + // 1. We're here because of a stack overflow, yet unwinding will run + // destructors and hence arbitrary code. What if that code overflows + // the stack? One possibility is to use the above allocation of an + // extra 10k to hope that we don't hit the limit, and if we do then + // abort the whole program. Not the best, but kind of hard to deal + // with unless we want to switch stacks. + // + // 2. LLVM will optimize functions based on whether they can unwind or + // not. It will flag functions with 'nounwind' if it believes that + // the function cannot trigger unwinding, but if we do unwind on + // stack overflow then it means that we could unwind in any function + // anywhere. We would have to make sure that LLVM only places the + // nounwind flag on functions which don't call any other functions. + // + // 3. The function that overflowed may have owned arguments. These + // arguments need to have their destructors run, but we haven't even + // begun executing the function yet, so unwinding will not run the + // any landing pads for these functions. If this is ignored, then + // the arguments will just be leaked. + // + // Exactly what to do here is a very delicate topic, and is possibly + // still up in the air for what exactly to do. Some relevant issues: + // + // #3555 - out-of-stack failure leaks arguments + // #3695 - should there be a stack limit? + // #9855 - possible strategies which could be taken + // #9854 - unwinding on windows through __morestack has never worked + // #2361 - possible implementation of not using landing pads + + let mut task = Local::borrow(None::); + let n = task.get().name.as_ref() + .map(|n| n.as_slice()).unwrap_or(""); + + // See the message below for why this is not emitted to the + // task's logger. This has the additional conundrum of the + // logger may not be initialized just yet, meaning that an FFI + // call would happen to initialized it (calling out to libuv), + // and the FFI call needs 2MB of stack when we just ran out. + println!("task '{}' has overflowed its stack", n); + + intrinsics::abort(); + } +} + +#[inline(always)] +pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) { + // When the old runtime had segmented stacks, it used a calculation that was + // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic + // symbol resolution, llvm function calls, etc. In theory this red zone + // value is 0, but it matters far less when we have gigantic stacks because + // we don't need to be so exact about our stack budget. The "fudge factor" + // was because LLVM doesn't emit a stack check for functions < 256 bytes in + // size. Again though, we have giant stacks, so we round all these + // calculations up to the nice round number of 20k. + record_sp_limit(stack_lo + RED_ZONE); + + return target_record_stack_bounds(stack_lo, stack_hi); + + #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)] + unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {} + #[cfg(windows, target_arch = "x86_64")] #[inline(always)] + unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) { + // Windows compiles C functions which may check the stack bounds. This + // means that if we want to perform valid FFI on windows, then we need + // to ensure that the stack bounds are what they truly are for this + // task. More info can be found at: + // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839 + // + // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom) + asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile"); + asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile"); + } +} + +/// Records the current limit of the stack as specified by `end`. +/// +/// This is stored in an OS-dependent location, likely inside of the thread +/// local storage. The location that the limit is stored is a pre-ordained +/// location because it's where LLVM has emitted code to check. +/// +/// Note that this cannot be called under normal circumstances. This function is +/// changing the stack limit, so upon returning any further function calls will +/// possibly be triggering the morestack logic if you're not careful. +/// +/// Also note that this and all of the inside functions are all flagged as +/// "inline(always)" because they're messing around with the stack limits. This +/// would be unfortunate for the functions themselves to trigger a morestack +/// invocation (if they were an actual function call). +#[inline(always)] +pub unsafe fn record_sp_limit(limit: uint) { + return target_record_sp_limit(limit); + + // x86-64 + #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $$0x60+90*8, %rsi + movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block + // store this inside of the "arbitrary data slot", but double the size + // because this is 64 bit instead of 32 bit + asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile") + } + + // x86 + #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movl $$0x48+90*4, %eax + movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile") + } + #[cfg(target_arch = "x86", target_os = "linux")] + #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block + // store this inside of the "arbitrary data slot" + asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile") + } + + // mips, arm - Some brave soul can port these to inline asm, but it's over + // my head personally + #[cfg(target_arch = "mips")] + #[cfg(target_arch = "arm")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + use libc::c_void; + return record_sp_limit(limit as *c_void); + extern { + fn record_sp_limit(limit: *c_void); + } + } +} + +/// The counterpart of the function above, this function will fetch the current +/// stack limit stored in TLS. +/// +/// Note that all of these functions are meant to be exact counterparts of their +/// brethren above, except that the operands are reversed. +/// +/// As with the setter, this function does not have a __morestack header and can +/// therefore be called in a "we're out of stack" situation. +#[inline(always)] +pub unsafe fn get_sp_limit() -> uint { + return target_get_sp_limit(); + + // x86-64 + #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq $$0x60+90*8, %rsi + movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + + // x86 + #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl $$0x48+90*4, %eax + movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile"); + return limit; + } + #[cfg(target_arch = "x86", target_os = "linux")] + #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + + // mips, arm - Some brave soul can port these to inline asm, but it's over + // my head personally + #[cfg(target_arch = "mips")] + #[cfg(target_arch = "arm")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + use libc::c_void; + return get_sp_limit() as uint; + extern { + fn get_sp_limit() -> *c_void; + } + } +} diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index 50fae1e0239a6..687efea939b52 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -8,353 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use cast; -use comm::{Chan, Port}; -use ptr; -use option::{Option,Some,None}; -use task; -use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst}; -use unstable::mutex::Mutex; -use ops::Drop; use clone::Clone; use kinds::Send; -use vec; - -/// An atomically reference counted pointer. -/// -/// Enforces no shared-memory safety. -//#[unsafe_no_drop_flag] FIXME: #9758 -pub struct UnsafeArc { - data: *mut ArcData, -} - -pub enum UnsafeArcUnwrap { - UnsafeArcSelf(UnsafeArc), - UnsafeArcT(T) -} - -#[cfg(test)] -impl UnsafeArcUnwrap { - fn expect_t(self, msg: &'static str) -> T { - match self { - UnsafeArcSelf(_) => fail!(msg), - UnsafeArcT(t) => t - } - } - - fn is_self(&self) -> bool { - match *self { - UnsafeArcSelf(_) => true, - UnsafeArcT(_) => false - } - } -} - -struct ArcData { - count: AtomicUint, - // An unwrapper uses this protocol to communicate with the "other" task that - // drops the last refcount on an arc. Unfortunately this can't be a proper - // pipe protocol because the unwrapper has to access both stages at once. - // FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)? - unwrapper: AtomicOption<(Chan<()>, Port)>, - // FIXME(#3224) should be able to make this non-option to save memory - data: Option, -} - -unsafe fn new_inner(data: T, refcount: uint) -> *mut ArcData { - let data = ~ArcData { count: AtomicUint::new(refcount), - unwrapper: AtomicOption::empty(), - data: Some(data) }; - cast::transmute(data) -} - -/// A helper object used by `UnsafeArc::unwrap`. -struct ChannelAndDataGuard { - channel: Option>, - data: Option<~ArcData>, -} - -#[unsafe_destructor] -impl Drop for ChannelAndDataGuard { - fn drop(&mut self) { - if task::failing() { - // Killed during wait. Because this might happen while - // someone else still holds a reference, we can't free - // the data now; the "other" last refcount will free it. - unsafe { - let channel = self.channel.take_unwrap(); - let data = self.data.take_unwrap(); - channel.send(false); - cast::forget(data); - } - } - } -} - -impl ChannelAndDataGuard { - fn unwrap(mut self) -> (Chan, ~ArcData) { - (self.channel.take_unwrap(), self.data.take_unwrap()) - } -} - -impl UnsafeArc { - pub fn new(data: T) -> UnsafeArc { - unsafe { UnsafeArc { data: new_inner(data, 1) } } - } - - /// As new(), but returns an extra pre-cloned handle. - pub fn new2(data: T) -> (UnsafeArc, UnsafeArc) { - unsafe { - let ptr = new_inner(data, 2); - (UnsafeArc { data: ptr }, UnsafeArc { data: ptr }) - } - } - - /// As new(), but returns a vector of as many pre-cloned handles as requested. - pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc] { - unsafe { - if num_handles == 0 { - ~[] // need to free data here - } else { - let ptr = new_inner(data, num_handles); - vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) - } - } - } - - /// As newN(), but from an already-existing handle. Uses one xadd. - pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc] { - if num_handles == 0 { - ~[] // The "num_handles - 1" trick (below) fails in the 0 case. - } else { - unsafe { - // Minus one because we are recycling the given handle's refcount. - let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire); - // let old_count = (*self.data).count.fetch_add(num_handles, Acquire); - assert!(old_count >= 1); - let ptr = self.data; - cast::forget(self); // Don't run the destructor on this handle. - vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) - } - } - } - - #[inline] - pub fn get(&self) -> *mut T { - unsafe { - assert!((*self.data).count.load(Relaxed) > 0); - let r: *mut T = (*self.data).data.get_mut_ref(); - return r; - } - } - - #[inline] - pub fn get_immut(&self) -> *T { - unsafe { - assert!((*self.data).count.load(Relaxed) > 0); - let r: *T = (*self.data).data.get_ref(); - return r; - } - } - - /// Wait until all other handles are dropped, then retrieve the enclosed - /// data. See extra::arc::Arc for specific semantics documentation. - /// If called when the task is already unkillable, unwrap will unkillably - /// block; otherwise, an unwrapping task can be killed by linked failure. - pub fn unwrap(self) -> T { - unsafe { - let mut this = self; - // The ~ dtor needs to run if this code succeeds. - let mut data: ~ArcData = cast::transmute(this.data); - // Set up the unwrap protocol. - let (p1,c1) = Chan::new(); // () - let (p2,c2) = Chan::new(); // bool - // Try to put our server end in the unwrapper slot. - // This needs no barrier -- it's protected by the release barrier on - // the xadd, and the acquire+release barrier in the destructor's xadd. - if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() { - // Got in. Tell this handle's destructor not to run (we are now it). - this.data = ptr::mut_null(); - // Drop our own reference. - let old_count = data.count.fetch_sub(1, Release); - assert!(old_count >= 1); - if old_count == 1 { - // We were the last owner. Can unwrap immediately. - // AtomicOption's destructor will free the server endpoint. - // FIXME(#3224): it should be like this - // let ~ArcData { data: user_data, _ } = data; - // user_data - data.data.take_unwrap() - } else { - // The *next* person who sees the refcount hit 0 will wake us. - let c2_and_data = ChannelAndDataGuard { - channel: Some(c2), - data: Some(data), - }; - p1.recv(); - // Got here. Back in the 'unkillable' without getting killed. - let (c2, data) = c2_and_data.unwrap(); - c2.send(true); - // FIXME(#3224): it should be like this - // let ~ArcData { data: user_data, _ } = data; - // user_data - let mut data = data; - data.data.take_unwrap() - } - } else { - // If 'put' returns the server end back to us, we were rejected; - // someone else was trying to unwrap. Avoid guaranteed deadlock. - cast::forget(data); - fail!("Another task is already unwrapping this Arc!"); - } - } - } - - /// As unwrap above, but without blocking. Returns 'UnsafeArcSelf(self)' if this is - /// not the last reference; 'UnsafeArcT(unwrapped_data)' if so. - pub fn try_unwrap(mut self) -> UnsafeArcUnwrap { - unsafe { - // The ~ dtor needs to run if this code succeeds. - let mut data: ~ArcData = cast::transmute(self.data); - // This can of course race with anybody else who has a handle, but in - // such a case, the returned count will always be at least 2. If we - // see 1, no race was possible. All that matters is 1 or not-1. - let count = data.count.load(Acquire); - assert!(count >= 1); - // The more interesting race is one with an unwrapper. They may have - // already dropped their count -- but if so, the unwrapper pointer - // will have been set first, which the barriers ensure we will see. - // (Note: using is_empty(), not take(), to not free the unwrapper.) - if count == 1 && data.unwrapper.is_empty(Acquire) { - // Tell this handle's destructor not to run (we are now it). - self.data = ptr::mut_null(); - // FIXME(#3224) as above - UnsafeArcT(data.data.take_unwrap()) - } else { - cast::forget(data); - UnsafeArcSelf(self) - } - } - } -} - -impl Clone for UnsafeArc { - fn clone(&self) -> UnsafeArc { - unsafe { - // This barrier might be unnecessary, but I'm not sure... - let old_count = (*self.data).count.fetch_add(1, Acquire); - assert!(old_count >= 1); - return UnsafeArc { data: self.data }; - } - } -} - -#[unsafe_destructor] -impl Drop for UnsafeArc{ - fn drop(&mut self) { - unsafe { - // Happens when destructing an unwrapper's handle and from `#[unsafe_no_drop_flag]` - if self.data.is_null() { - return - } - let mut data: ~ArcData = cast::transmute(self.data); - // Must be acquire+release, not just release, to make sure this - // doesn't get reordered to after the unwrapper pointer load. - let old_count = data.count.fetch_sub(1, SeqCst); - assert!(old_count >= 1); - if old_count == 1 { - // Were we really last, or should we hand off to an - // unwrapper? It's safe to not xchg because the unwrapper - // will set the unwrap lock *before* dropping his/her - // reference. In effect, being here means we're the only - // *awake* task with the data. - match data.unwrapper.take(Acquire) { - Some(~(message, response)) => { - // Send 'ready' and wait for a response. - message.send(()); - // Unkillable wait. Message guaranteed to come. - if response.recv() { - // Other task got the data. - cast::forget(data); - } else { - // Other task was killed. drop glue takes over. - } - } - None => { - // drop glue takes over. - } - } - } else { - cast::forget(data); - } - } - } -} - - -/****************************************************************************/ - -pub struct AtomicGuard { - on: bool, -} - -impl Drop for AtomicGuard { - fn drop(&mut self) { - use rt::task::{Task, GreenTask, SchedTask}; - use rt::local::Local; - - if self.on { - unsafe { - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(t) => { - match (*t).task_type { - GreenTask(_) => (*t).death.allow_deschedule(), - SchedTask => {} - } - } - None => {} - } - } - } - } -} - -/** - * Enables a runtime assertion that no operation while the returned guard is - * live uses scheduler operations (deschedule, recv, spawn, etc). This is for - * use with pthread mutexes, which may block the entire scheduler thread, - * rather than just one task, and is hence prone to deadlocks if mixed with - * descheduling. - * - * NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section - * synchronization whatsoever. It only makes sense to use for CPU-local issues. - */ -// FIXME(#8140) should not be pub -pub unsafe fn atomic() -> AtomicGuard { - use rt::task::{Task, GreenTask, SchedTask}; - use rt::local::Local; - - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(t) => { - match (*t).task_type { - GreenTask(_) => { - (*t).death.inhibit_deschedule(); - return AtomicGuard { - on: true, - }; - } - SchedTask => {} - } - } - None => {} - } - - AtomicGuard { - on: false, - } -} +use ops::Drop; +use option::{Option,Some,None}; +use sync::arc::UnsafeArc; +use unstable::mutex::Mutex; pub struct LittleLock { priv l: Mutex, @@ -496,37 +155,14 @@ impl Exclusive { l.wait(); } } - - pub fn unwrap(self) -> T { - let Exclusive { x: x } = self; - // Someday we might need to unkillably unwrap an Exclusive, but not today. - let inner = x.unwrap(); - let ExData { data: user_data, .. } = inner; // will destroy the LittleLock - user_data - } } #[cfg(test)] mod tests { use option::*; use prelude::*; - use super::{Exclusive, UnsafeArc, atomic}; + use super::Exclusive; use task; - use mem::size_of; - - //#[unsafe_no_drop_flag] FIXME: #9758 - #[ignore] - #[test] - fn test_size() { - assert_eq!(size_of::>(), size_of::<*[int, ..10]>()); - } - - #[test] - fn test_atomic() { - // NB. The whole runtime will abort on an 'atomic-sleep' violation, - // so we can't really test for the converse behaviour. - unsafe { let _ = atomic(); } // oughtn't fail - } #[test] fn exclusive_new_arc() { @@ -570,114 +206,4 @@ mod tests { x.with(|one| assert_eq!(*one, 1)); } } - - #[test] - fn arclike_newN() { - // Tests that the many-refcounts-at-once constructors don't leak. - let _ = UnsafeArc::new2(~~"hello"); - let x = UnsafeArc::newN(~~"hello", 0); - assert_eq!(x.len(), 0) - let x = UnsafeArc::newN(~~"hello", 1); - assert_eq!(x.len(), 1) - let x = UnsafeArc::newN(~~"hello", 10); - assert_eq!(x.len(), 10) - } - - #[test] - fn arclike_cloneN() { - // Tests that the many-refcounts-at-once special-clone doesn't leak. - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(0); - assert_eq!(x.len(), 0); - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(1); - assert_eq!(x.len(), 1); - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(10); - assert_eq!(x.len(), 10); - } - - #[test] - fn arclike_unwrap_basic() { - let x = UnsafeArc::new(~~"hello"); - assert!(x.unwrap() == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap() { - let x = UnsafeArc::new(~~"hello"); - assert!(x.try_unwrap().expect_t("try_unwrap failed") == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap_fail() { - let x = UnsafeArc::new(~~"hello"); - let x2 = x.clone(); - let left_x = x.try_unwrap(); - assert!(left_x.is_self()); - drop(left_x); - assert!(x2.try_unwrap().expect_t("try_unwrap none") == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap_unwrap_race() { - // When an unwrap and a try_unwrap race, the unwrapper should always win. - let x = UnsafeArc::new(~~"hello"); - let x2 = x.clone(); - let (p,c) = Chan::new(); - do task::spawn { - c.send(()); - assert!(x2.unwrap() == ~~"hello"); - c.send(()); - } - p.recv(); - task::deschedule(); // Try to make the unwrapper get blocked first. - let left_x = x.try_unwrap(); - assert!(left_x.is_self()); - drop(left_x); - p.recv(); - } - - #[test] - fn exclusive_new_unwrap_basic() { - // Unlike the above, also tests no double-freeing of the LittleLock. - let x = Exclusive::new(~~"hello"); - assert!(x.unwrap() == ~~"hello"); - } - - #[test] - fn exclusive_new_unwrap_contended() { - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - do task::spawn { - unsafe { x2.with(|_hello| ()); } - task::deschedule(); - } - assert!(x.unwrap() == ~~"hello"); - - // Now try the same thing, but with the child task blocking. - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - let mut builder = task::task(); - let res = builder.future_result(); - do builder.spawn { - assert!(x2.unwrap() == ~~"hello"); - } - // Have to get rid of our reference before blocking. - drop(x); - res.recv(); - } - - #[test] #[should_fail] - fn exclusive_new_unwrap_conflict() { - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - let mut builder = task::task(); - let res = builder.future_result(); - do builder.spawn { - assert!(x2.unwrap() == ~~"hello"); - } - assert!(x.unwrap() == ~~"hello"); - assert!(res.recv().is_ok()); - } } diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 97d4c2f6d1b15..86f28c28f6977 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -2874,7 +2874,6 @@ impl Extendable for ~[A] { #[cfg(test)] mod tests { - use option::{None, Some}; use mem; use vec::*; use cmp::*; diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index 930d25e7443e7..aa7e0d0eced8d 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -606,7 +606,6 @@ impl AstBuilder for @ExtCtxt { ~[ self.ident_of("std"), self.ident_of("rt"), - self.ident_of("task"), self.ident_of("begin_unwind"), ], ~[ diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index a6e45c7e1bbb3..2c2669e914cca 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -740,10 +740,10 @@ pub fn std_macros() -> @str { fail!("explicit failure") ); ($msg:expr) => ( - ::std::rt::task::begin_unwind($msg, file!(), line!()) + ::std::rt::begin_unwind($msg, file!(), line!()) ); ($fmt:expr, $($arg:tt)*) => ( - ::std::rt::task::begin_unwind(format!($fmt, $($arg)*), file!(), line!()) + ::std::rt::begin_unwind(format!($fmt, $($arg)*), file!(), line!()) ) ) diff --git a/src/test/bench/rt-messaging-ping-pong.rs b/src/test/bench/rt-messaging-ping-pong.rs index 90d81aa7c3ee6..6eef71622c5c6 100644 --- a/src/test/bench/rt-messaging-ping-pong.rs +++ b/src/test/bench/rt-messaging-ping-pong.rs @@ -1,4 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -12,7 +11,6 @@ extern mod extra; use std::os; use std::uint; -use std::rt::test::spawntask_later; // This is a simple bench that creates M pairs of of tasks. These // tasks ping-pong back and forth over a pair of streams. This is a @@ -28,7 +26,7 @@ fn ping_pong_bench(n: uint, m: uint) { // Create a stream B->A let (pb,cb) = Chan::<()>::new(); - do spawntask_later() || { + do spawn() || { let chan = ca; let port = pb; n.times(|| { @@ -37,7 +35,7 @@ fn ping_pong_bench(n: uint, m: uint) { }) } - do spawntask_later() || { + do spawn() || { let chan = cb; let port = pa; n.times(|| { diff --git a/src/test/bench/rt-parfib.rs b/src/test/bench/rt-parfib.rs index ab607d9aebc75..6e3c42f2a4dea 100644 --- a/src/test/bench/rt-parfib.rs +++ b/src/test/bench/rt-parfib.rs @@ -12,7 +12,6 @@ extern mod extra; use std::os; use std::uint; -use std::rt::test::spawntask_later; // A simple implementation of parfib. One subtree is found in a new // task and communicated over a oneshot pipe, the other is found @@ -24,7 +23,7 @@ fn parfib(n: uint) -> uint { } let (port,chan) = Chan::new(); - do spawntask_later { + do spawn { chan.send(parfib(n-1)); }; let m2 = parfib(n-2); diff --git a/src/test/bench/shootout-spectralnorm.rs b/src/test/bench/shootout-spectralnorm.rs index 87cd01f9aad2e..8174347e3869e 100644 --- a/src/test/bench/shootout-spectralnorm.rs +++ b/src/test/bench/shootout-spectralnorm.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// xfail-test arcs no longer unwrap + extern mod extra; use std::from_str::FromStr; diff --git a/src/test/compile-fail/std-uncopyable-atomics.rs b/src/test/compile-fail/std-uncopyable-atomics.rs index a46dec7830a29..57c66974fcd01 100644 --- a/src/test/compile-fail/std-uncopyable-atomics.rs +++ b/src/test/compile-fail/std-uncopyable-atomics.rs @@ -12,7 +12,7 @@ #[feature(globs)]; -use std::unstable::atomics::*; +use std::sync::atomics::*; use std::ptr; fn main() { diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/Makefile b/src/test/run-make/bootstrap-from-c-with-green/Makefile similarity index 100% rename from src/test/run-make/bootstrap-from-c-with-uvio/Makefile rename to src/test/run-make/bootstrap-from-c-with-green/Makefile diff --git a/src/test/run-pass/rt-start-main-thread.rs b/src/test/run-make/bootstrap-from-c-with-green/lib.rs similarity index 66% rename from src/test/run-pass/rt-start-main-thread.rs rename to src/test/run-make/bootstrap-from-c-with-green/lib.rs index 47a723ce6e14d..9a03c772f3a3f 100644 --- a/src/test/run-pass/rt-start-main-thread.rs +++ b/src/test/run-make/bootstrap-from-c-with-green/lib.rs @@ -8,14 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// xfail-fast +#[crate_id="boot#0.1"]; +#[crate_type="lib"]; +#[no_uv]; -#[start] -fn start(argc: int, argv: **u8) -> int { - do std::rt::start_on_main_thread(argc, argv) { - info!("running on main thread"); +extern mod rustuv; +extern mod green; + +#[no_mangle] // this needs to get called from C +pub extern "C" fn foo(argc: int, argv: **u8) -> int { + do green::start(argc, argv) { do spawn { - info!("running on another thread"); + println!("hello"); } } } diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/main.c b/src/test/run-make/bootstrap-from-c-with-green/main.c similarity index 100% rename from src/test/run-make/bootstrap-from-c-with-uvio/main.c rename to src/test/run-make/bootstrap-from-c-with-green/main.c diff --git a/src/test/run-make/bootstrap-from-c-with-native/Makefile b/src/test/run-make/bootstrap-from-c-with-native/Makefile new file mode 100644 index 0000000000000..7f466573da730 --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-native/Makefile @@ -0,0 +1,9 @@ +-include ../tools.mk + +all: + $(RUSTC) lib.rs -Z gen-crate-map + ln -nsf $(call DYLIB,boot-*) $(call DYLIB,boot) + $(CC) main.c -o $(call RUN,main) -lboot -Wl,-rpath,$(TMPDIR) + $(call RUN,main) + rm $(call DYLIB,boot) + $(call FAIL,main) diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/lib.rs b/src/test/run-make/bootstrap-from-c-with-native/lib.rs similarity index 89% rename from src/test/run-make/bootstrap-from-c-with-uvio/lib.rs rename to src/test/run-make/bootstrap-from-c-with-native/lib.rs index 06a06c967f4fb..d0639d45fa569 100644 --- a/src/test/run-make/bootstrap-from-c-with-uvio/lib.rs +++ b/src/test/run-make/bootstrap-from-c-with-native/lib.rs @@ -10,14 +10,13 @@ #[crate_id="boot#0.1"]; #[crate_type="lib"]; +#[no_uv]; -extern mod rustuv; // pull in uvio - -use std::rt; +extern mod native; #[no_mangle] // this needs to get called from C pub extern "C" fn foo(argc: int, argv: **u8) -> int { - do rt::start(argc, argv) { + do native::start(argc, argv) { do spawn { println!("hello"); } diff --git a/src/test/run-pass/native-print-no-uv.rs b/src/test/run-make/bootstrap-from-c-with-native/main.c similarity index 75% rename from src/test/run-pass/native-print-no-uv.rs rename to src/test/run-make/bootstrap-from-c-with-native/main.c index d3b6d6059849d..1872c1ea43b11 100644 --- a/src/test/run-pass/native-print-no-uv.rs +++ b/src/test/run-make/bootstrap-from-c-with-native/main.c @@ -8,10 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// xfail-fast +// this is the rust entry point that we're going to call. +int foo(int argc, char *argv[]); -#[no_uv]; - -fn main() { - println!("hello"); +int main(int argc, char *argv[]) { + return foo(argc, argv); } diff --git a/src/test/run-pass/core-rt-smoke.rs b/src/test/run-pass/core-rt-smoke.rs deleted file mode 100644 index 6e3d9629da043..0000000000000 --- a/src/test/run-pass/core-rt-smoke.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// xfail-fast - -// A simple test of starting the runtime manually - -#[start] -fn start(argc: int, argv: **u8) -> int { - do std::rt::start(argc, argv) { - info!("creating my own runtime is joy"); - } -} diff --git a/src/test/run-pass/spawning-with-debug.rs b/src/test/run-pass/spawning-with-debug.rs index 76975d15c1d0c..f8094f9fdb9e8 100644 --- a/src/test/run-pass/spawning-with-debug.rs +++ b/src/test/run-pass/spawning-with-debug.rs @@ -17,6 +17,5 @@ use std::task; fn main() { let mut t = task::task(); - t.sched_mode(task::SingleThreaded); t.spawn(proc() ()); } diff --git a/src/test/run-pass/use.rs b/src/test/run-pass/use.rs index ddd4b10fd5c95..013487e580381 100644 --- a/src/test/run-pass/use.rs +++ b/src/test/run-pass/use.rs @@ -10,6 +10,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// xfail-fast + #[allow(unused_imports)]; #[no_std]; @@ -25,4 +27,5 @@ mod baz { pub use x = std::str; } -pub fn main() { } +#[start] +pub fn start(_: int, _: **u8) -> int { 0 }